repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
patpatpatpatpat/digestus | digestus/users/migrations/0002_auto_20160411_0706.py | 1 | 1042 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-11 07:06
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| bsd-3-clause | 3,592,190,506,129,512,400 | -6,751,428,395,168,490,000 | 36.214286 | 409 | 0.619962 | false |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_str.py | 51 | 15114 |
import struct
import sys
from test import test_support, string_tests
class StrTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = str
# We don't need to propagate to str
def fixtype(self, obj):
return obj
def test_basic_creation(self):
self.assertEqual(str(''), '')
self.assertEqual(str(0), '0')
self.assertEqual(str(0L), '0')
self.assertEqual(str(()), '()')
self.assertEqual(str([]), '[]')
self.assertEqual(str({}), '{}')
a = []
a.append(a)
self.assertEqual(str(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(str(a), '{0: {...}}')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
self.assertRaises(OverflowError, '%c'.__mod__, 0x1234)
def test_conversion(self):
# Make sure __str__() behaves properly
class Foo0:
def __unicode__(self):
return u"foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return u"foo"
class Foo4(unicode):
def __str__(self):
return u"foo"
class Foo5(str):
def __str__(self):
return u"foo"
class Foo6(str):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo7(unicode):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "string"
def __unicode__(self):
return "not unicode"
self.assert_(str(Foo0()).startswith("<")) # this is different from __unicode__
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foos")
self.assertEqual(str(Foo7("bar")), "foos")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
def test_expandtabs_overflows_gracefully(self):
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
return
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
def test(value, format, expected):
# test both with and without the trailing 's'
self.assertEqual(value.__format__(format), expected)
self.assertEqual(value.__format__(format + 's'), expected)
test('', '', '')
test('abc', '', 'abc')
test('abc', '.3', 'abc')
test('ab', '.3', 'ab')
test('abcdef', '.3', 'abc')
test('abcdef', '.0', '')
test('abc', '3.3', 'abc')
test('abc', '2.3', 'abc')
test('abc', '2.2', 'ab')
test('abc', '3.2', 'ab ')
test('result', 'x<0', 'result')
test('result', 'x<5', 'result')
test('result', 'x<6', 'result')
test('result', 'x<7', 'resultx')
test('result', 'x<8', 'resultxx')
test('result', ' <7', 'result ')
test('result', '<7', 'result ')
test('result', '>7', ' result')
test('result', '>8', ' result')
test('result', '^8', ' result ')
test('result', '^9', ' result ')
test('result', '^10', ' result ')
test('a', '10000', 'a' + ' ' * 9999)
test('', '10000', ' ' * 10000)
test('', '10000000', ' ' * 10000000)
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
# class that returns a bad type from __format__
class H:
def __format__(self, format_spec):
return 1.0
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r and !s coersions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0}'.format(E('data')), 'E(data)')
self.assertEqual('{0:^10}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:^10s}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "{".format)
self.assertRaises(ValueError, "}".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(ValueError, "{:}".format)
self.assertRaises(ValueError, "{:s}".format)
self.assertRaises(ValueError, "{}".format)
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
def test_buffer_is_readonly(self):
self.assertRaises(TypeError, sys.stdin.readinto, b"")
def test_main():
test_support.run_unittest(StrTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | 7,531,468,449,491,639,000 | 6,113,730,734,959,349,000 | 39.304 | 115 | 0.515152 | false |
cchamanEE/pydare | test/dlyaptest.py | 2 | 1617 | from pydare.dlyap import dlyap_iterative, dlyap_schur, dlyap_slycot
import numpy
import unittest
class DlyapTestCase(unittest.TestCase):
def setUp(self):
self.a = numpy.matrix([[0.5,1.0],[-1.0,-1.0]])
self.q = numpy.matrix([[2.0,0.0],[0.0,0.5]])
def testIterative(self):
x = dlyap_iterative(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def testDirect(self):
x = dlyap_schur(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def testSLICOT(self):
x = dlyap_slycot(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def suite():
suite = unittest.TestSuite()
suite.addTest(DlyapTestCase('testIterative'))
suite.addTest(DlyapTestCase('testDirect'))
suite.addTest(DlyapTestCase('testSLICOT'))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | gpl-3.0 | -3,849,271,539,583,681,500 | 1,425,596,031,514,933,800 | 28.722222 | 67 | 0.52381 | false |
mortada/numpy | tools/swig/test/testFlat.py | 108 | 6906 | #! /usr/bin/env python
from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
import struct
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import Flat
######################################################################
class FlatTestCase(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test the (type* INPLACE_ARRAY_FLAT, int DIM_FLAT) typemap
def testProcess1D(self):
"Test Process function 1D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(10):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
y = x.copy()
process(y)
self.assertEquals(np.all((x+1)==y),True)
def testProcess3D(self):
"Test Process function 3D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
y = x.copy()
process(y)
self.assertEquals(np.all((x+1)==y),True)
def testProcess3DTranspose(self):
"Test Process function 3D array, FORTRAN order"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
y = x.copy()
process(y.T)
self.assertEquals(np.all((x.T+1)==y.T),True)
def testProcessNoncontiguous(self):
"Test Process function with non-contiguous array, which should raise an error"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
self.assertRaises(TypeError, process, x[:,:,0])
######################################################################
class scharTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print("Testing 1D Functions of Module Flat")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| bsd-3-clause | 6,214,743,420,752,028,000 | 5,895,308,844,020,806,000 | 33.53 | 86 | 0.532725 | false |
plaes/flask-sendmail | docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| bsd-3-clause | -2,250,901,400,850,255,400 | -5,377,451,328,696,894,000 | 55.686047 | 83 | 0.435897 | false |
jhamman/xarray | xarray/core/dask_array_ops.py | 1 | 3415 | import numpy as np
from . import dtypes, nputils
def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):
"""Wrapper to apply bottleneck moving window funcs on dask arrays
"""
import dask.array as da
dtype, fill_value = dtypes.maybe_promote(a.dtype)
a = a.astype(dtype)
# inputs for overlap
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = (window + 1) // 2
boundary = {d: fill_value for d in range(a.ndim)}
# Create overlap array.
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
# apply rolling func
out = ag.map_blocks(
moving_func, window, min_count=min_count, axis=axis, dtype=a.dtype
)
# trim array
result = da.overlap.trim_internal(out, depth)
return result
def rolling_window(a, axis, window, center, fill_value):
"""Dask's equivalence to np.utils.rolling_window
"""
import dask.array as da
orig_shape = a.shape
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = int(window / 2)
# For evenly sized window, we need to crop the first point of each block.
offset = 1 if window % 2 == 0 else 0
if depth[axis] > min(a.chunks[axis]):
raise ValueError(
"For window size %d, every chunk should be larger than %d, "
"but the smallest chunk size is %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array."
% (window, depth[axis], min(a.chunks[axis]))
)
# Although da.overlap pads values to boundaries of the array,
# the size of the generated array is smaller than what we want
# if center == False.
if center:
start = int(window / 2) # 10 -> 5, 9 -> 4
end = window - 1 - start
else:
start, end = window - 1, 0
pad_size = max(start, end) + offset - depth[axis]
drop_size = 0
# pad_size becomes more than 0 when the overlapped array is smaller than
# needed. In this case, we need to enlarge the original array by padding
# before overlapping.
if pad_size > 0:
if pad_size < depth[axis]:
# overlapping requires each chunk larger than depth. If pad_size is
# smaller than the depth, we enlarge this and truncate it later.
drop_size = depth[axis] - pad_size
pad_size = depth[axis]
shape = list(a.shape)
shape[axis] = pad_size
chunks = list(a.chunks)
chunks[axis] = (pad_size,)
fill_array = da.full(shape, fill_value, dtype=a.dtype, chunks=chunks)
a = da.concatenate([fill_array, a], axis=axis)
boundary = {d: fill_value for d in range(a.ndim)}
# create overlap arrays
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
# apply rolling func
def func(x, window, axis=-1):
x = np.asarray(x)
rolling = nputils._rolling_window(x, window, axis)
return rolling[(slice(None),) * axis + (slice(offset, None),)]
chunks = list(a.chunks)
chunks.append(window)
out = ag.map_blocks(
func, dtype=a.dtype, new_axis=a.ndim, chunks=chunks, window=window, axis=axis
)
# crop boundary.
index = (slice(None),) * axis + (slice(drop_size, drop_size + orig_shape[axis]),)
return out[index]
| apache-2.0 | -1,509,723,400,812,049,200 | -1,469,610,345,778,420,700 | 34.206186 | 85 | 0.610542 | false |
jds2001/ocp-checkbox | plainbox/plainbox/impl/checkbox.py | 1 | 11485 | # This file is part of Checkbox.
#
# Copyright 2012, 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.checkbox` -- CheckBox integration
=====================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
import collections
import io
import logging
import os
from plainbox.impl import get_plainbox_dir
from plainbox.impl.applogic import RegExpJobQualifier, CompositeQualifier
from plainbox.impl.job import JobDefinition
from plainbox.impl.rfc822 import load_rfc822_records
logger = logging.getLogger("plainbox.checkbox")
# NOTE: using CompositeQualifier seems strange but it's a tested proven
# component so all we have to ensure is that we read the whitelist files
# correctly.
class WhiteList(CompositeQualifier):
"""
A qualifier that understands checkbox whitelist files.
A whitelist file is a plain text, line oriented file. Each line represents
a regular expression pattern that can be matched against the name of a job.
The file can contain simple shell-style comments that begin with the pound
or hash key (#). Those are ignored. Comments can span both a fraction of a
line as well as the whole line.
For historical reasons each pattern has an implicit '^' and '$' prepended
and appended (respectively) to the actual pattern specified in the file.
"""
def __init__(self, pattern_list, name=None):
"""
Initialize a whitelist object with the specified list of patterns.
The patterns must be already mangled with '^' and '$'.
"""
inclusive = [RegExpJobQualifier(pattern) for pattern in pattern_list]
exclusive = ()
super(WhiteList, self).__init__(inclusive, exclusive)
self._name = name
@property
def name(self):
"""
name of this WhiteList (might be None)
"""
return self._name
@classmethod
def from_file(cls, pathname):
"""
Load and initialize the WhiteList object from the specified file.
:param pathname: file to load
:returns: a fresh WhiteList object
"""
pattern_list = cls._load_patterns(pathname)
name = os.path.splitext(os.path.basename(pathname))[0]
return cls(pattern_list, name=name)
@classmethod
def _load_patterns(self, pathname):
"""
Load whitelist patterns from the specified file
"""
pattern_list = []
# Load the file
with open(pathname, "rt", encoding="UTF-8") as stream:
for line in stream:
# Strip shell-style comments if there are any
try:
index = line.index("#")
except ValueError:
pass
else:
line = line[:index]
# Strip whitespace
line = line.strip()
# Skip empty lines (especially after stripping comments)
if line == "":
continue
# Surround the pattern with ^ and $
# so that it wont just match a part of the job name.
regexp_pattern = r"^{pattern}$".format(pattern=line)
# Accumulate patterns into the list
pattern_list.append(regexp_pattern)
return pattern_list
class CheckBoxNotFound(LookupError):
"""
Exception used to report that CheckBox cannot be located
"""
def __repr__(self):
return "CheckBoxNotFound()"
def __str__(self):
return "CheckBox cannot be found"
def _get_checkbox_dir():
"""
Return the root directory of the checkbox source checkout
Historically plainbox used a git submodule with checkbox tree (converted to
git). This ended with the merge of plainbox into the checkbox tree.
Now it's the other way around and the checkbox tree can be located two
directories "up" from the plainbox module, in a checkbox-old directory.
"""
return os.path.normpath(
os.path.join(
get_plainbox_dir(), "..", "..", "checkbox-old"))
class CheckBox:
"""
Helper class for interacting with CheckBox
PlainBox relies on CheckBox for actual jobs, scripts and library features
required by the scripts. This class allows one to interact with CheckBox
without having to bother with knowing how to set up the environment.
This class also abstracts away the differences between dealing with
CheckBox that is installed from system packages and CheckBox that is
available from a checkout directory.
"""
# Helper for locating certain directories
CheckBoxDirs = collections.namedtuple(
"CheckBoxDirs", "SHARE_DIR SCRIPTS_DIR JOBS_DIR DATA_DIR")
# Temporary helper to compute "src" value below
source_dir = _get_checkbox_dir()
_DIRECTORY_MAP = collections.OrderedDict((
# Layout for source checkout
("src", CheckBoxDirs(
source_dir,
os.path.join(source_dir, "scripts"),
os.path.join(source_dir, "jobs"),
os.path.join(source_dir, "data"))),
# Layout for installed version
("deb", CheckBoxDirs(
"/usr/share/checkbox/",
"/usr/share/checkbox/scripts",
"/usr/share/checkbox/jobs",
"/usr/share/checkbox/data"))))
# Remove temporary helper that was needed above
del source_dir
def __init__(self, mode=None):
"""
Initialize checkbox integration.
:param mode:
If specified it determines which checkbox installation to use.
None (default) enables auto-detection. Applicable values are
``src``, ``deb1`` and ``deb2``. The first value selects checkbox as
present in the code repository. The last two values are both for
intended for a checkbox package that was installed from the Ubuntu
repository. They are different as checkbox packaging changed across
releases.
:raises CheckBoxNotFound:
if checkbox cannot be located anywhere
:raises ValueError:
if ``mode`` is not supported
"""
# Auto-detect if not explicitly configured
if mode is None:
for possible_mode, dirs in self._DIRECTORY_MAP.items():
if all(os.path.exists(dirname) for dirname in dirs):
logger.info("Using checkbox in mode %s", possible_mode)
mode = possible_mode
break
else:
raise CheckBoxNotFound()
# Ensure mode is known
if mode not in self._DIRECTORY_MAP:
raise ValueError("Unsupported mode")
else:
self._mode = mode
self._dirs = self._DIRECTORY_MAP[mode]
@property
def CHECKBOX_SHARE(self):
"""
Return the required value of CHECKBOX_SHARE environment variable.
.. note::
This variable is only required by one script.
It would be nice to remove this later on.
"""
return self._dirs.SHARE_DIR
@property
def extra_PYTHONPATH(self):
"""
Return additional entry for PYTHONPATH, if needed.
This entry is required for CheckBox scripts to import the correct
CheckBox python libraries.
.. note::
The result may be None
"""
# NOTE: When CheckBox is installed then all the scripts should not use
# 'env' to locate the python interpreter (otherwise they might use
# virtualenv which is not desirable for Debian packages). When we're
# using CheckBox from source then the source directory (which contains
# the 'checkbox' package) should be added to PYTHONPATH for all the
# imports to work.
if self._mode == "src":
return _get_checkbox_dir()
else:
return None
@property
def extra_PATH(self):
"""
Return additional entry for PATH
This entry is required to lookup CheckBox scripts.
"""
# NOTE: This is always the script directory. The actual logic for
# locating it is implemented in the property accessors.
return self.scripts_dir
@property
def jobs_dir(self):
"""
Return an absolute path of the jobs directory
"""
return self._dirs.JOBS_DIR
@property
def whitelists_dir(self):
"""
Return an absolute path of the whitelist directory
"""
return os.path.join(self._dirs.DATA_DIR, "whitelists")
@property
def scripts_dir(self):
"""
Return an absolute path of the scripts directory
.. note::
The scripts may not work without setting PYTHONPATH and
CHECKBOX_SHARE.
"""
return self._dirs.SCRIPTS_DIR
def get_builtin_whitelists(self):
logger.debug("Loading built-in whitelists...")
whitelist_list = []
for name in os.listdir(self.whitelists_dir):
if name.endswith(".whitelist"):
whitelist_list.append(
WhiteList.from_file(os.path.join(
self.whitelists_dir, name)))
return whitelist_list
def get_builtin_jobs(self):
logger.debug("Loading built-in jobs...")
job_list = []
for name in os.listdir(self.jobs_dir):
if name.endswith(".txt") or name.endswith(".txt.in"):
job_list.extend(
self.load_jobs(
os.path.join(self.jobs_dir, name)))
return job_list
def load_jobs(self, somewhere):
"""
Load job definitions from somewhere
"""
if isinstance(somewhere, str):
# Load data from a file with the given name
filename = somewhere
with open(filename, 'rt', encoding='UTF-8') as stream:
return self.load_jobs(stream)
if isinstance(somewhere, io.TextIOWrapper):
stream = somewhere
logger.debug("Loading jobs definitions from %r...", stream.name)
record_list = load_rfc822_records(stream)
job_list = []
for record in record_list:
job = JobDefinition.from_rfc822_record(record)
job._checkbox = self
logger.debug("Loaded %r", job)
job_list.append(job)
return job_list
else:
raise TypeError(
"Unsupported type of 'somewhere': {!r}".format(
type(somewhere)))
@property
def name(self):
"""
name of this provider (always checkbox)
"""
return "checkbox"
| gpl-3.0 | 7,951,871,017,664,625,000 | -3,997,041,817,807,398,000 | 32.97929 | 79 | 0.60714 | false |
slarosa/QGIS | python/plugins/sextante/algs/SaveSelectedFeatures.py | 3 | 4504 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SaveSelectedFeatures.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.outputs.OutputVector import OutputVector
from sextante.parameters.ParameterVector import ParameterVector
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sextante.core.QGisLayers import QGisLayers
class SaveSelectedFeatures(GeoAlgorithm):
'''This is an example algorithm that takes a vector layer and creates
a new one just with just those features of the input layer that are
selected.
It is meant to be used as an example of how to create your own SEXTANTE
algorithms and explain methods and variables used to do it.
An algorithm like this will be available in all SEXTANTE elements, and
there is not need for additional work.
All SEXTANTE algorithms should extend the GeoAlgorithm class'''
#constants used to refer to parameters and outputs.
#They will be used when calling the algorithm from another algorithm,
#or when calling SEXTANTE from the QGIS console.
OUTPUT_LAYER = "OUTPUT_LAYER"
INPUT_LAYER = "INPUT_LAYER"
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
#the name that the user will see in the toolbox
self.name = "Save selected features"
#the branch of the toolbox under which the algorithm will appear
self.group = "Vector general tools"
#we add the input vector layer. It can have any kind of geometry
#It is a mandatory (not optional) one, hence the False argument
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", ParameterVector.VECTOR_TYPE_ANY, False))
# we add a vector layer as output
self.addOutput(OutputVector(self.OUTPUT_LAYER, "Output layer with selected features"))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
#the first thing to do is retrieve the values of the parameters
#entered by the user
inputFilename = self.getParameterValue(self.INPUT_LAYER)
output = self.getOutputFromName(self.OUTPUT_LAYER)
#input layers values are always a string with its location.
#That string can be converted into a QGIS object (a QgsVectorLayer in this case))
#using the Sextante.getObject() method
vectorLayer = QGisLayers.getObjectFromUri(inputFilename)
#And now we can process
#First we create the output layer.
#To do so, we call the getVectorWriter method in the Output object.
#That will give as a SextanteVectorWriter, that we can later use to add features.
provider = vectorLayer.dataProvider()
writer = output.getVectorWriter( provider.fields(), provider.geometryType(), vectorLayer.crs() )
#Now we take the selected features and add them to the output layer
features = QGisLayers.features(vectorLayer)
total = len(features)
i = 0
for feat in features:
writer.addFeature(feat)
progress.setPercentage(100 * i / float(total))
i += 1
del writer
#There is nothing more to do here. We do not have to open the layer that we have created.
#SEXTANTE will take care of that, or will handle it if this algorithm is executed within
#a complex model
| gpl-2.0 | 2,994,466,087,678,142,000 | -8,646,920,597,305,559,000 | 43.156863 | 115 | 0.623668 | false |
prds21/barrial-movie | barrial-movie/channels/quierodibujosanimados.py | 9 | 5799 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para quierodibujosanimados
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
DEBUG = config.get_setting("debug")
__category__ = "A"
__type__ = "generic"
__title__ = "Quiero dibujos animados"
__channel__ = "quierodibujosanimados"
__language__ = "ES"
__creationdate__ = "20121112"
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.channels.quierodibujosanimados mainlist")
#itemlist.append( Item(channel=__channel__ , action="novedades" , title="Novedades" , url="http://www.quierodibujosanimados.com/"))
return series( Item(channel=__channel__ , action="series" , title="Series" , url="http://www.quierodibujosanimados.com/"))
def series(item):
logger.info("pelisalacarta.channels.quierodibujosanimados series")
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<ul class="categorias">(.*?)</ul')
#<a href="http://www.quierodibujosanimados.com/cat/popeye-el-marino/38" title="Popeye el marino">Popeye el marino</a>
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg"))
next_page_url = scrapertools.find_single_match(data,'</span[^<]+<a href="([^"]+)">')
if next_page_url!="":
itemlist.append( Item(channel=__channel__, action="episodios", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page_url) , folder=True, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg") )
return itemlist
def episodios(item):
logger.info("pelisalacarta.channels.quierodibujosanimados episodios")
'''
<li>
<div class="info">
<h2><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h2>
<p>Caillou volvía con su hermanita Rosi y su mamá de la biblioteca y traían un montón de libros que Caillou quería leer, especialmente uno de piratas. Capítulo titulado "Caillou ratón de biblioteca".</p>
<div class="pie">
<div class="categoria">
<span>Categoría:</span>
<a href="http://www.quierodibujosanimados.com/cat/caillou/14" title="Caillou" class="categoria">Caillou</a>
</div>
<div class="puntuacion">
<div class="rating_16 punt_0" data-noticia="954">
<span>0.5</span>
<span>1</span>
<span>1.5</span>
<span>2</span>
<span>2.5</span>
<span>3</span>
<span>3.5</span>
<span>4</span>
<span>4.5</span>
<span>5</span>
</div>
</div>
</div>
<span class="pico"></span>
</div>
<div class="dibujo">
<a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca" class="thumb">
<img src="http://www.quierodibujosanimados.com/i/thm-Caillou-raton-de-biblioteca.jpg" alt="Caillou ratón de biblioteca" width="137" height="174" />
</a>
<h4><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h4>
</div>
</li>
'''
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<div class="dibujo"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg"))
next_page_url = scrapertools.find_single_match(data,'</span[^<]+<a href="([^"]+)">')
if next_page_url!="":
itemlist.append( Item(channel=__channel__, action="episodios", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page_url) , folder=True, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg") )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
bien = True
from servers import servertools
# mainlist
serie_itemlist = mainlist(Item())
# Comprueba que todas las opciones tengan algo (excepto el buscador)
for serie_item in serie_itemlist:
episodio_itemlist = episodios(serie_item)
for episodio_item in episodio_itemlist:
mirrors = servertools.find_video_items(item=episodio_item)
if len(mirrors)>0:
return True
return False | gpl-3.0 | 3,142,571,927,099,335,000 | -378,850,452,082,782,660 | 39.426573 | 245 | 0.648789 | false |
username115/FRCScouting | gen_scripts/SQLITEContractGen.py | 1 | 8061 | #! /usr/bin/python
_description = '''
This script take in a SQL file with INSERTS and CREATES and transforms
it into a SQLite contract in Java. Meant to be used with a phpmyadmin
exported sql file. Defaults assume the FRC 836 file structure.
'''
_defaultRun = '''
python SQLITEContractGen.py
--packagename=org.frc836.database
--classname=FRCScoutingContract
--infile=FRC_Scouting_Server/scouting.sql
--outfile=src/org/frc836/database/FRCScoutingContract.java
'''
__author__ = "Jonny"
__version__ = "2.0"
__copyright__ = ""
import SQLHelper
import autogeninfo
import os
import re
import argparse
# note to self. Look into the 'textwrap' class for functionality
class SqlToJava():
re_GetSqlVar = re.compile(r"[`](?P<var>\w+)[`]")
re_CreateStatement = re.compile(r'''
\s* CREATE \s+ TABLE \s+ IF \s+ NOT \s+ EXISTS \s+ # grabs the create statement
[`] (?P<tablename>\w+) [`] # matches the table name
(?P<body>[^;]+) # matches the body
[;]
''',re.VERBOSE)
re_InsertStatement = re.compile(r'''
\s* INSERT \s+ INTO \s+ # finds the insert statements
[`] (?P<tablename>\w+) [`] # matches the tablename
\s+ [(] \s*
(?P<colnames>[^)]+)
[)] \s* VALUES [^(]*
(?P<body>[^;]+) [;]
''',re.VERBOSE)
re_GetColumn = re.compile(r'''
(^|\n) \s+
[`] (?P<name>\w+) [`] # grabs the column name
\s+ (?P<type>\S+) \s+ # grabs the type
''',re.VERBOSE)
re_GetRow = re.compile(r'''
[(]
(?P<row>.+)
[)]
#[(] (?P<row>[^)]+) [)] # matches everything in parens
''',re.VERBOSE)
def __init__(self, packageName=None, className="DefaultJavaClassName",
baseClass=None, baseClassHeader=None):
self.tables = list()
self.packageName = packageName
self.className = className
self.baseClass = baseClass
self.baseClassHeader = baseClassHeader
def findTableName(self, tableName):
for i in range(0, len(self.tables)):
if tableName == self.tables[i].name:
return i
return None
def addTable(self, table):
self.tables.append(table)
def createStr_Header(self):
_myscriptname = os.path.basename(__file__)
ret = "/*\n"
ret += autogeninfo._autogenScriptInfo_Str(__version__, _myscriptname) +"\n"
ret += "*/\n\n"
ret += "package "+ self.packageName +";\n"
ret += "\n"
if self.baseClassHeader:
ret += "import "+ self.baseClassHeader +";\n"
ret += "\n"
ret += "public final class "+ self.className +" {\n"
ret += "\tpublic "+ self.className +"() {}"
return ret
def createStr_Footer(self):
ret = "}"
return ret
def createStr_Classes(self):
s = ""
for table in self.tables:
s += table.createStr_Class(self.baseClass) +"\n\n"
return s[0:-2]
def createStr_DropStr(self):
s = "public static final String[] SQL_DELETE_ENTRIES = {\n"
for table in self.tables:
tmp = "\""+ table.createStr_DropStr() +"\""
s += SQLHelper.indent(tmp) +",\n"
return s[0:-2] +"\n};"
def createStr_CreateStr(self):
s = "public static final String[] SQL_CREATE_ENTRIES = {\n"
for table in self.tables:
s += SQLHelper.indent( SQLHelper.toJavaString(table.createStr_CreateStr()))
s += ",\n\n"
tmp = table.createStr_InsertStr()
if tmp:
s += SQLHelper.indent( SQLHelper.toJavaString(tmp))
s += ",\n\n"
return s[0:-3] +"\n};"
def createStr_JavaSqLite(self):
s = ""
s += self.createStr_Header() +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_Classes()) +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_CreateStr()) +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_DropStr()) +"\n"
s += "\n"
s += self.createStr_Footer()
return s
def _parseStatement_Create(self, statement):
match = self.re_CreateStatement.search(statement)
if match:
table = SQLHelper.SqlTable( match.group('tablename') )
for ln in match.group('body').split(','):
match = self.re_GetColumn.search(ln)
if match:
name = match.group('name')
type = match.group('type')
if re.search("unsigned",ln): unsigned = True
else: unsigned = False
if re.search("NOT NULL",ln): nullVal = False
else: nullVal = False
if re.search("AUTO_INCREMENT",ln): autoInc = True
else: autoInc = False
match = re.search("DEFAULT\s+(?P<val>\S+)",ln)
if match: default = match.group('val')
else: default=None
table.addColumn( SQLHelper.SqlColumn(columnName=name, columnType=type,
isPrimary=False, defaultVal=default,
nullValid=nullVal, autoIncrement=autoInc,
isUnsigned=unsigned) )
if re.search("PRIMARY\s+KEY",ln):
primaryKey = re.search("PRIMARY\s+KEY\s+[(][`](?P<key>\w+)[`][)]",ln).group('key')
for column in table.columns:
if column.name == primaryKey:
column.primary = True
self.addTable(table)
def _parseStatement_Insert(self, statement):
match = self.re_InsertStatement.search(statement)
if match:
tableName = match.group('tablename')
colNames = match.group('colnames')
body = match.group('body')
i_table = self.findTableName(tableName)
mapping = self.tables[i_table].getColumnMapping_csv(colNames)
for row in self.re_GetRow.findall( body ):
self.tables[i_table].addRow(row)
def readFile(self, filename, verbose=False):
f = open(filename,'r')
if verbose: print("Reading from \'"+ str(f.name) +"\' in mode \'"+ str(f.mode) +"\'")
for ln in f.read().split(';'):
ln += ';'
if self.re_CreateStatement.search(ln):
self._parseStatement_Create(ln)
elif self.re_InsertStatement.search(ln):
self._parseStatement_Insert(ln)
f.close()
def writeJavaSqLiteFile(self, filename, verbose=False):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
if verbose: print("Creating output directory: " + directory)
os.makedirs(directory)
f = open(filename,'w')
if verbose: print("Writing to \'"+ str(f.name) +"\' in mode \'"+ str(f.mode) +"\'")
f.write( self.createStr_JavaSqLite() )
f.close()
def printCreates(self):
for table in self.tables:
print( table.createStr_CreateStr() +"\n")
def printInserts(self):
for table in self.tables:
print( table.createStr_InsertStr() +"\n")
#===============================================================================
# init_args()
# Sets up the command line parsing logic. Any changes to cmd line input should
# take place here.
# ------------------------------------------
# return
# args : the list of parsed arguments
#===============================================================================
def init_args():
parser = argparse.ArgumentParser(description=_description)
parser.add_argument('-i','--infile',dest='infilename',required=False,
help='The .sql file that you want to parse from')
parser.add_argument('-o','--outfile',dest='outfilename',required=False,
help='The Java file you want to write out to')
parser.add_argument('--classname','-cn',required=False,
help='The name of the Java class')
parser.add_argument('--packagename','-pn',required=False,
help='The database package to use')
parser.add_argument('--baseclass','-bc',required=False,
help='The class that all of the generated classes will implement')
parser.add_argument('--baseclassHeader','-bch',required=False,
help='The file that needs to be imported to use the baseclass')
parser.set_defaults( infilename='FRC_Scouting_Server/scouting.sql',
outfilename='src/org/frc836/database/FRCScoutingContract.java',
packagename='org.frc836.database',
classname='FRCScoutingContract',
baseclass='BaseColumns',
baseclassHeader='android.provider.BaseColumns'
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = init_args()
SqlCreator = SqlToJava(packageName = args.packagename,
className = args.classname,
baseClass = args.baseclass,
baseClassHeader = args.baseclassHeader
)
SqlCreator.readFile(args.infilename, verbose=True)
SqlCreator.writeJavaSqLiteFile(args.outfilename, verbose=True)
| apache-2.0 | -2,963,673,393,153,700,400 | 4,995,846,724,419,183,000 | 32.5875 | 87 | 0.62945 | false |
grlee77/scipy | scipy/spatial/tests/test_slerp.py | 11 | 15434 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.spatial import geometric_slerp
def _generate_spherical_points(ndim=3, n_pts=2):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
# tentatively extended to arbitrary dims
# for 0-sphere it will always produce antipodes
np.random.seed(123)
points = np.random.normal(size=(n_pts, ndim))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points[0], points[1]
class TestGeometricSlerp:
# Test various properties of the geometric slerp code
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [0, 3, 17])
def test_shape_property(self, n_dims, n_pts):
# geometric_slerp output shape should match
# input dimensionality & requested number
# of interpolation points
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert actual.shape == (n_pts, n_dims)
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [3, 17])
def test_include_ends(self, n_dims, n_pts):
# geometric_slerp should return a data structure
# that includes the start and end coordinates
# when t includes 0 and 1 ends
# this is convenient for plotting surfaces represented
# by interpolations for example
# the generator doesn't work so well for the unit
# sphere (it always produces antipodes), so use
# custom values there
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert_allclose(actual[0], start)
assert_allclose(actual[-1], end)
@pytest.mark.parametrize("start, end", [
# both arrays are not flat
(np.zeros((1, 3)), np.ones((1, 3))),
# only start array is not flat
(np.zeros((1, 3)), np.ones(3)),
# only end array is not flat
(np.zeros(1), np.ones((3, 1))),
])
def test_input_shape_flat(self, start, end):
# geometric_slerp should handle input arrays that are
# not flat appropriately
with pytest.raises(ValueError, match='one-dimensional'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# 7-D and 3-D ends
(np.zeros(7), np.ones(3)),
# 2-D and 1-D ends
(np.zeros(2), np.ones(1)),
# empty, "3D" will also get caught this way
(np.array([]), np.ones(3)),
])
def test_input_dim_mismatch(self, start, end):
# geometric_slerp must appropriately handle cases where
# an interpolation is attempted across two different
# dimensionalities
with pytest.raises(ValueError, match='dimensions'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# both empty
(np.array([]), np.array([])),
])
def test_input_at_least1d(self, start, end):
# empty inputs to geometric_slerp must
# be handled appropriately when not detected
# by mismatch
with pytest.raises(ValueError, match='at least two-dim'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end, expected", [
# North and South Poles are definitely antipodes
# but should be handled gracefully now
(np.array([0, 0, 1.0]), np.array([0, 0, -1.0]), "warning"),
# this case will issue a warning & be handled
# gracefully as well;
# North Pole was rotated very slightly
# using r = R.from_euler('x', 0.035, degrees=True)
# to achieve Euclidean distance offset from diameter by
# 9.328908379124812e-08, within the default tol
(np.array([0.00000000e+00,
-6.10865200e-04,
9.99999813e-01]), np.array([0, 0, -1.0]), "warning"),
# this case should succeed without warning because a
# sufficiently large
# rotation was applied to North Pole point to shift it
# to a Euclidean distance of 2.3036691931821451e-07
# from South Pole, which is larger than tol
(np.array([0.00000000e+00,
-9.59930941e-04,
9.99999539e-01]), np.array([0, 0, -1.0]), "success"),
])
def test_handle_antipodes(self, start, end, expected):
# antipodal points must be handled appropriately;
# there are an infinite number of possible geodesic
# interpolations between them in higher dims
if expected == "warning":
with pytest.warns(UserWarning, match='antipodes'):
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
else:
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
# antipodes or near-antipodes should still produce
# slerp paths on the surface of the sphere (but they
# may be ambiguous):
assert_allclose(np.linalg.norm(res, axis=1), 1.0)
@pytest.mark.parametrize("start, end, expected", [
# 2-D with n_pts=4 (two new interpolation points)
# this is an actual circle
(np.array([1, 0]),
np.array([0, 1]),
np.array([[1, 0],
[np.sqrt(3) / 2, 0.5], # 30 deg on unit circle
[0.5, np.sqrt(3) / 2], # 60 deg on unit circle
[0, 1]])),
# likewise for 3-D (add z = 0 plane)
# this is an ordinary sphere
(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([[1, 0, 0],
[np.sqrt(3) / 2, 0.5, 0],
[0.5, np.sqrt(3) / 2, 0],
[0, 1, 0]])),
# for 5-D, pad more columns with constants
# zeros are easiest--non-zero values on unit
# circle are more difficult to reason about
# at higher dims
(np.array([1, 0, 0, 0, 0]),
np.array([0, 1, 0, 0, 0]),
np.array([[1, 0, 0, 0, 0],
[np.sqrt(3) / 2, 0.5, 0, 0, 0],
[0.5, np.sqrt(3) / 2, 0, 0, 0],
[0, 1, 0, 0, 0]])),
])
def test_straightforward_examples(self, start, end, expected):
# some straightforward interpolation tests, sufficiently
# simple to use the unit circle to deduce expected values;
# for larger dimensions, pad with constants so that the
# data is N-D but simpler to reason about
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
assert_allclose(actual, expected, atol=1e-16)
@pytest.mark.parametrize("t", [
# both interval ends clearly violate limits
np.linspace(-20, 20, 300),
# only one interval end violating limit slightly
np.linspace(-0.0001, 0.0001, 17),
])
def test_t_values_limits(self, t):
# geometric_slerp() should appropriately handle
# interpolation parameters < 0 and > 1
with pytest.raises(ValueError, match='interpolation parameter'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=t)
@pytest.mark.parametrize("start, end", [
(np.array([1]),
np.array([0])),
(np.array([0]),
np.array([1])),
(np.array([-17.7]),
np.array([165.9])),
])
def test_0_sphere_handling(self, start, end):
# it does not make sense to interpolate the set of
# two points that is the 0-sphere
with pytest.raises(ValueError, match='at least two-dim'):
_ = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
@pytest.mark.parametrize("tol", [
# an integer currently raises
5,
# string raises
"7",
# list and arrays also raise
[5, 6, 7], np.array(9.0),
])
def test_tol_type(self, tol):
# geometric_slerp() should raise if tol is not
# a suitable float type
with pytest.raises(ValueError, match='must be a float'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("tol", [
-5e-6,
-7e-10,
])
def test_tol_sign(self, tol):
# geometric_slerp() currently handles negative
# tol values, as long as they are floats
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("start, end", [
# 1-sphere (circle) with one point at origin
# and the other on the circle
(np.array([1, 0]), np.array([0, 0])),
# 2-sphere (normal sphere) with both points
# just slightly off sphere by the same amount
# in different directions
(np.array([1 + 1e-6, 0, 0]),
np.array([0, 1 - 1e-6, 0])),
# same thing in 4-D
(np.array([1 + 1e-6, 0, 0, 0]),
np.array([0, 1 - 1e-6, 0, 0])),
])
def test_unit_sphere_enforcement(self, start, end):
# geometric_slerp() should raise on input that clearly
# cannot be on an n-sphere of radius 1
with pytest.raises(ValueError, match='unit n-sphere'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 5))
@pytest.mark.parametrize("start, end", [
# 1-sphere 45 degree case
(np.array([1, 0]),
np.array([np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
# 2-sphere 135 degree case
(np.array([1, 0]),
np.array([-np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
])
@pytest.mark.parametrize("t_func", [
np.linspace, np.logspace])
def test_order_handling(self, start, end, t_func):
# geometric_slerp() should handle scenarios with
# ascending and descending t value arrays gracefully;
# results should simply be reversed
# for scrambled / unsorted parameters, the same values
# should be returned, just in scrambled order
num_t_vals = 20
np.random.seed(789)
forward_t_vals = t_func(0, 10, num_t_vals)
# normalize to max of 1
forward_t_vals /= forward_t_vals.max()
reverse_t_vals = np.flipud(forward_t_vals)
shuffled_indices = np.arange(num_t_vals)
np.random.shuffle(shuffled_indices)
scramble_t_vals = forward_t_vals.copy()[shuffled_indices]
forward_results = geometric_slerp(start=start,
end=end,
t=forward_t_vals)
reverse_results = geometric_slerp(start=start,
end=end,
t=reverse_t_vals)
scrambled_results = geometric_slerp(start=start,
end=end,
t=scramble_t_vals)
# check fidelity to input order
assert_allclose(forward_results, np.flipud(reverse_results))
assert_allclose(forward_results[shuffled_indices],
scrambled_results)
@pytest.mark.parametrize("t", [
# string:
"15, 5, 7",
# complex numbers currently produce a warning
# but not sure we need to worry about it too much:
# [3 + 1j, 5 + 2j],
])
def test_t_values_conversion(self, t):
with pytest.raises(ValueError):
_ = geometric_slerp(start=np.array([1]),
end=np.array([0]),
t=t)
def test_accept_arraylike(self):
# array-like support requested by reviewer
# in gh-10380
actual = geometric_slerp([1, 0], [0, 1], [0, 1/3, 0.5, 2/3, 1])
# expected values are based on visual inspection
# of the unit circle for the progressions along
# the circumference provided in t
expected = np.array([[1, 0],
[np.sqrt(3) / 2, 0.5],
[np.sqrt(2) / 2,
np.sqrt(2) / 2],
[0.5, np.sqrt(3) / 2],
[0, 1]], dtype=np.float64)
# Tyler's original Cython implementation of geometric_slerp
# can pass at atol=0 here, but on balance we will accept
# 1e-16 for an implementation that avoids Cython and
# makes up accuracy ground elsewhere
assert_allclose(actual, expected, atol=1e-16)
def test_scalar_t(self):
# when t is a scalar, return value is a single
# interpolated point of the appropriate dimensionality
# requested by reviewer in gh-10380
actual = geometric_slerp([1, 0], [0, 1], 0.5)
expected = np.array([np.sqrt(2) / 2,
np.sqrt(2) / 2], dtype=np.float64)
assert actual.shape == (2,)
assert_allclose(actual, expected)
@pytest.mark.parametrize('start', [
np.array([1, 0, 0]),
np.array([0, 1]),
])
def test_degenerate_input(self, start):
# handle start == end with repeated value
# like np.linspace
expected = [start] * 5
actual = geometric_slerp(start=start,
end=start,
t=np.linspace(0, 1, 5))
assert_allclose(actual, expected)
@pytest.mark.parametrize('k', np.logspace(-10, -1, 10))
def test_numerical_stability_pi(self, k):
# geometric_slerp should have excellent numerical
# stability for angles approaching pi between
# the start and end points
angle = np.pi - k
ts = np.linspace(0, 1, 100)
P = np.array([1, 0, 0, 0])
Q = np.array([np.cos(angle), np.sin(angle), 0, 0])
# the test should only be enforced for cases where
# geometric_slerp determines that the input is actually
# on the unit sphere
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning)
result = geometric_slerp(P, Q, ts, 1e-18)
norms = np.linalg.norm(result, axis=1)
error = np.max(np.abs(norms - 1))
assert error < 4e-15
| bsd-3-clause | -2,335,777,806,024,847,400 | 1,660,766,680,576,800,000 | 39.29765 | 72 | 0.527601 | false |
mariopro/youtube-dl | youtube_dl/extractor/npo.py | 18 | 17167 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
token = self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
# Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
token_l = list(token)
first = second = None
for i in range(5, len(token_l) - 4):
if token_l[i].isdigit():
if first is None:
first = i
elif second is None:
second = i
if first is None or second is None:
first = 12
second = 13
token_l[first], token_l[second] = token_l[second], token_l[first]
return ''.join(token_l)
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl and ntr.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?!live|radio)(?:[^/]+/){2}|
ntr\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
{
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'md5': '01c6a2841675995da1f0cf776f03a9c3',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
},
{
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'md5': 'd30cd8417b8b9bca1fdff27428860d08',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
# smooth streaming is not supported
if stream_type in ['ss', 'ms']:
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
'Unable to download %s URL' % stream_type,
transform_source=strip_jsonp, fatal=False)
if not stream_url:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
'preference': -10,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class VPROIE(NPOIE):
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'Sergio Herman: Fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': '2Doc',
},
'playlist_count': 2,
}
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
for video_id in re.findall(r'data-media-id="([^"]+)"', webpage)
]
playlist_title = self._search_regex(
r'<title>\s*([^>]+?)\s*-\s*Teledoc\s*-\s*VPRO\s*</title>',
webpage, 'playlist title', default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class WNLIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_TEST = {
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id, 'NPO')
for video_id, part in re.findall(
r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>(Deel \d+)', webpage)
]
playlist_title = self._html_search_regex(
r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>',
webpage, 'playlist title')
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense | 589,753,390,874,850,700 | 8,225,628,845,806,813,000 | 34.250513 | 111 | 0.469214 | false |
w1ll1am23/home-assistant | tests/components/sql/test_sensor.py | 3 | 2727 | """The test for the sql sensor platform."""
import pytest
import voluptuous as vol
from homeassistant.components.sql.sensor import validate_sql_select
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
async def test_query(hass):
"""Test the SQL sensor."""
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT 5 as value",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == "5"
assert state.attributes["value"] == 5
async def test_invalid_query(hass):
"""Test the SQL sensor for invalid queries."""
with pytest.raises(vol.Invalid):
validate_sql_select("DROP TABLE *")
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT * value FROM sqlite_master;",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == STATE_UNKNOWN
@pytest.mark.parametrize(
"url,expected_patterns,not_expected_patterns",
[
(
"sqlite://homeassistant:hunter2@homeassistant.local",
["sqlite://****:****@homeassistant.local"],
["sqlite://homeassistant:hunter2@homeassistant.local"],
),
(
"sqlite://homeassistant.local",
["sqlite://homeassistant.local"],
[],
),
],
)
async def test_invalid_url(hass, caplog, url, expected_patterns, not_expected_patterns):
"""Test credentials in url is not logged."""
config = {
"sensor": {
"platform": "sql",
"db_url": url,
"queries": [
{
"name": "count_tables",
"query": "SELECT 5 as value",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for pattern in not_expected_patterns:
assert pattern not in caplog.text
for pattern in expected_patterns:
assert pattern in caplog.text
| apache-2.0 | -2,971,388,094,644,024,300 | -3,793,041,403,761,608,700 | 27.113402 | 88 | 0.520719 | false |
cloakedcode/CouchPotatoServer | libs/suds/bindings/document.py | 204 | 5792 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for the (WS) SOAP I{document/literal}.
"""
from logging import getLogger
from suds import *
from suds.bindings.binding import Binding
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Binding):
"""
The document/literal style. Literal is the only (@use) supported
since document/encoded is pretty much dead.
Although the soap specification supports multiple documents within the soap
<body/>, it is very uncommon. As such, suds presents an I{RPC} view of
service methods defined with a single document parameter. This is done so
that the user can pass individual parameters instead of one, single document.
To support the complete specification, service methods defined with multiple documents
(multiple message parts), must present a I{document} view for that method.
"""
def bodycontent(self, method, args, kwargs):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
if not len(method.soap.input.body.parts):
return ()
wrapped = method.soap.input.body.wrapped
if wrapped:
pts = self.bodypart_types(method)
root = self.document(pts[0])
else:
root = []
n = 0
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
n += 1
p = self.mkparam(method, pd, value)
if p is None:
continue
if not wrapped:
ns = pd[1].namespace('ns0')
p.setPrefix(ns[0], ns[1])
root.append(p)
return root
def replycontent(self, method, body):
wrapped = method.soap.output.body.wrapped
if wrapped:
return body[0].children
else:
return body.children
def document(self, wrapper):
"""
Get the document root. For I{document/literal}, this is the
name of the wrapper element qualifed by the schema tns.
@param wrapper: The method name.
@type wrapper: L{xsd.sxbase.SchemaObject}
@return: A root element.
@rtype: L{Element}
"""
tag = wrapper[1].name
ns = wrapper[1].namespace('ns0')
d = Element(tag, ns=ns)
return d
def mkparam(self, method, pdef, object):
#
# Expand list parameters into individual parameters
# each with the type information. This is because in document
# arrays are simply unbounded elements.
#
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkparam(method, pdef, item))
return tags
else:
return Binding.mkparam(self, method, pdef, object)
def param_defs(self, method):
#
# Get parameter definitions for document literal.
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
pts = self.bodypart_types(method)
wrapped = method.soap.input.body.wrapped
if not wrapped:
return pts
result = []
# wrapped
for p in pts:
resolved = p[1].resolve()
for child, ancestry in resolved:
if child.isattr():
continue
if self.bychoice(ancestry):
log.debug(
'%s\ncontained by <choice/>, excluded as param for %s()',
child,
method.name)
continue
result.append((child.name, child))
return result
def returned_types(self, method):
result = []
wrapped = method.soap.output.body.wrapped
rts = self.bodypart_types(method, input=False)
if wrapped:
for pt in rts:
resolved = pt.resolve(nobuiltin=True)
for child, ancestry in resolved:
result.append(child)
break
else:
result += rts
return result
def bychoice(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return False | gpl-3.0 | -7,827,400,552,421,202,000 | -8,168,748,838,316,281,000 | 35.20625 | 90 | 0.581664 | false |
williamfeng323/py-web | flask/lib/python3.6/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 32 | 3143 | # mssql/pymssql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| mit | -1,751,375,249,134,686,000 | -149,637,974,728,079,360 | 31.402062 | 75 | 0.593382 | false |
jkonecny12/anaconda | tests/nosetests/pyanaconda_tests/core/signal_test.py | 5 | 5383 | #
# Martin Kolman <mkolman@redhat.com>
#
# Copyright 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
# Test the Python-based signal and slot implementation.
#
import unittest
from pyanaconda.core.signal import Signal
class FooClass(object):
def __init__(self):
self._var = None
@property
def var(self):
return self._var
def set_var(self, value):
self._var = value
class SignalTestCase(unittest.TestCase):
def setUp(self):
self.var = None
def method_test(self):
"""Test if a method can be correctly connected to a signal."""
signal = Signal()
foo = FooClass()
self.assertIsNone(foo.var)
# connect the signal
signal.connect(foo.set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(foo.set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def function_test(self):
"""Test if a local function can be correctly connected to a signal."""
# create a local function
def set_var(value):
self.var = value
signal = Signal()
self.assertIsNone(self.var)
# connect the signal
signal.connect(set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(self.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(self.var, "baz")
# now try to disconnect the signal
signal.disconnect(set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(self.var, "baz")
def lambda_test(self):
"""Test if a lambda can be correctly connected to a signal."""
foo = FooClass()
signal = Signal()
self.assertIsNone(foo.var)
# connect the signal
# pylint: disable=unnecessary-lambda
lambda_instance = lambda x: foo.set_var(x)
signal.connect(lambda_instance)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(lambda_instance)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def clear_test(self):
"""Test if the clear() method correctly clears any connected callbacks."""
def set_var(value):
self.var = value
signal = Signal()
foo = FooClass()
lambda_foo = FooClass()
self.assertIsNone(foo.var)
self.assertIsNone(lambda_foo.var)
self.assertIsNone(self.var)
# connect the callbacks
signal.connect(set_var)
signal.connect(foo.set_var)
# pylint: disable=unnecessary-lambda
signal.connect(lambda x: lambda_foo.set_var(x))
# trigger the signal
signal.emit("bar")
# check that the callbacks were triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
# clear the callbacks
signal.clear()
# trigger the signal again
signal.emit("anaconda")
# check that the callbacks were not triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
def signal_chain_test(self):
"""Check if signals can be chained together."""
foo = FooClass()
self.assertIsNone(foo.var)
signal1 = Signal()
signal1.connect(foo.set_var)
signal2 = Signal()
signal2.connect(signal1.emit)
signal3 = Signal()
signal3.connect(signal2.emit)
# trigger the chain
signal3.emit("bar")
# check if the initial callback was triggered
self.assertEqual(foo.var, "bar")
| gpl-2.0 | 2,912,752,023,466,530,300 | -2,520,813,132,844,501,500 | 33.50641 | 82 | 0.631618 | false |
PXke/invenio | invenio/ext/logging/wrappers.py | 1 | 19138 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
""" Error handling library """
__revision__ = "$Id$"
import traceback
import os
import sys
import time
import datetime
import re
import inspect
from flask import current_app
from six import iteritems, StringIO
from invenio.base.globals import cfg
from .models import HstEXCEPTION
## Regular expression to match possible password related variable that should
## be disclosed in frame analysis.
RE_PWD = re.compile(r"pwd|pass|p_pw", re.I)
def get_pretty_wide_client_info(req):
"""Return in a pretty way all the avilable information about the current
user/client"""
if req:
from invenio.legacy.webuser import collect_user_info
user_info = collect_user_info(req)
keys = user_info.keys()
keys.sort()
max_key = max([len(key) for key in keys])
ret = ""
fmt = "%% %is: %%s\n" % max_key
for key in keys:
if RE_PWD.search(key):
continue
if key in ('uri', 'referer'):
ret += fmt % (key, "<%s>" % user_info[key])
else:
ret += fmt % (key, user_info[key])
if ret.endswith('\n'):
return ret[:-1]
else:
return ret
else:
return "No client information available"
def get_tracestack():
"""
If an exception has been caught, return the system tracestack or else
return tracestack of what is currently in the stack
"""
if traceback.format_tb(sys.exc_info()[2]):
delimiter = "\n"
tracestack_pretty = "Traceback: \n%s" % \
delimiter.join(traceback.format_tb(sys.exc_info()[2]))
else:
## force traceback except for this call
tracestack = traceback.extract_stack()[:-1]
tracestack_pretty = "%sForced traceback (most recent call last)" % \
(' '*4, )
for trace_tuple in tracestack:
tracestack_pretty += """
File "%(file)s", line %(line)s, in %(function)s
%(text)s""" % {
'file': trace_tuple[0],
'line': trace_tuple[1],
'function': trace_tuple[2],
'text': trace_tuple[3] is not None and \
str(trace_tuple[3]) or ""}
return tracestack_pretty
def register_emergency(msg, recipients=None):
"""Launch an emergency. This means to send email messages to each
address in 'recipients'. By default recipients will be obtained via
get_emergency_recipients() which loads settings from
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES
"""
from invenio.ext.email import send_email
if not recipients:
recipients = get_emergency_recipients()
recipients = set(recipients)
recipients.add(cfg['CFG_SITE_ADMIN_EMAIL'])
for address_str in recipients:
send_email(cfg['CFG_SITE_SUPPORT_EMAIL'], address_str, "Emergency notification", msg)
def get_emergency_recipients(recipient_cfg=None):
"""Parse a list of appropriate emergency email recipients from
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES, or from a provided dictionary
comprised of 'time constraint' => 'comma separated list of addresses'
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES format example:
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES = {
'Sunday 22:00-06:00': '0041761111111@email2sms.foo.com',
'06:00-18:00': 'team-in-europe@foo.com,0041762222222@email2sms.foo.com',
'18:00-06:00': 'team-in-usa@foo.com',
'*': 'john.doe.phone@foo.com'}
"""
from invenio.utils.date import parse_runtime_limit
if recipient_cfg is None:
recipient_cfg = cfg['CFG_SITE_EMERGENCY_EMAIL_ADDRESSES']
recipients = set()
for time_condition, address_str in recipient_cfg.items():
if time_condition and time_condition is not '*':
(current_range, future_range) = parse_runtime_limit(time_condition)
if not current_range[0] <= datetime.datetime.now() <= current_range[1]:
continue
recipients.update([address_str])
return list(recipients)
def find_all_values_to_hide(local_variables, analyzed_stack=None):
"""Return all the potential password to hyde."""
## Let's add at least the DB password.
if analyzed_stack is None:
ret = set([cfg['CFG_DATABASE_PASS']])
analyzed_stack = set()
else:
ret = set()
for key, value in iteritems(local_variables):
if id(value) in analyzed_stack:
## Let's avoid loops
continue
analyzed_stack.add(id(value))
if RE_PWD.search(key):
ret.add(str(value))
if isinstance(value, dict):
ret |= find_all_values_to_hide(value, analyzed_stack)
if '' in ret:
## Let's discard the empty string in case there is an empty password,
## or otherwise anything will be separated by '<*****>' in the output
## :-)
ret.remove('')
return ret
def get_pretty_traceback(req=None, exc_info=None, skip_frames=0):
"""
Given an optional request object and an optional exc_info,
returns a text string representing many details about an exception.
"""
if exc_info is None:
exc_info = sys.exc_info()
if exc_info[0]:
## We found an exception.
## We want to extract the name of the Exception
exc_name = exc_info[0].__name__
exc_value = str(exc_info[1])
filename, line_no, function_name = _get_filename_and_line(exc_info)
## Let's record when and where and what
www_data = "%(time)s -> %(name)s: %(value)s (%(file)s:%(line)s:%(function)s)" % {
'time': time.strftime("%Y-%m-%d %H:%M:%S"),
'name': exc_name,
'value': exc_value,
'file': filename,
'line': line_no,
'function': function_name }
## Let's retrieve contextual user related info, if any
try:
client_data = get_pretty_wide_client_info(req)
except Exception as err:
client_data = "Error in retrieving " \
"contextual information: %s" % err
## Let's extract the traceback:
tracestack_data_stream = StringIO()
print("\n** Traceback details \n", file=tracestack_data_stream)
traceback.print_exc(file=tracestack_data_stream)
stack = [frame[0] for frame in inspect.trace()]
#stack = [frame[0] for frame in inspect.getouterframes(exc_info[2])][skip_frames:]
try:
stack.reverse()
print("\n** Stack frame details", file=tracestack_data_stream)
values_to_hide = set()
for frame in stack:
try:
print(file=tracestack_data_stream)
print("Frame %s in %s at line %s" % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno), file=tracestack_data_stream)
## Dereferencing f_locals
## See: http://utcc.utoronto.ca/~cks/space/blog/python/FLocalsAndTraceFunctions
local_values = frame.f_locals
try:
values_to_hide |= find_all_values_to_hide(local_values)
code = open(frame.f_code.co_filename).readlines()
first_line = max(1, frame.f_lineno-3)
last_line = min(len(code), frame.f_lineno+3)
print("-" * 79, file=tracestack_data_stream)
for line in xrange(first_line, last_line+1):
code_line = code[line-1].rstrip()
if line == frame.f_lineno:
print("----> %4i %s" % (line, code_line), file=tracestack_data_stream)
else:
print(" %4i %s" % (line, code_line), file=tracestack_data_stream)
print("-" * 79, file=tracestack_data_stream)
except:
pass
for key, value in local_values.items():
print("\t%20s = " % key, end=' ', file=tracestack_data_stream)
try:
value = repr(value)
except Exception as err:
## We shall gracefully accept errors when repr() of
## a value fails (e.g. when we are trying to repr() a
## variable that was not fully initialized as the
## exception was raised during its __init__ call).
value = "ERROR: when representing the value: %s" % (err)
try:
print(_truncate_dynamic_string(value), file=tracestack_data_stream)
except:
print("<ERROR WHILE PRINTING VALUE>", file=tracestack_data_stream)
finally:
del frame
finally:
del stack
tracestack_data = tracestack_data_stream.getvalue()
for to_hide in values_to_hide:
## Let's hide passwords
tracestack_data = tracestack_data.replace(to_hide, '<*****>')
## Okay, start printing:
output = StringIO()
print("* %s" % www_data, file=output)
print("\n** User details", file=output)
print(client_data, file=output)
if tracestack_data:
print(tracestack_data, file=output)
return output.getvalue()
else:
return ""
def register_exception(stream='error',
req=None,
prefix='',
suffix='',
alert_admin=False,
subject=''):
"""
Log error exception to invenio.err and warning exception to invenio.log.
Errors will be logged together with client information (if req is
given).
Note: For sanity reasons, dynamic params such as PREFIX, SUFFIX and
local stack variables are checked for length, and only first 500
chars of their values are printed.
@param stream: 'error' or 'warning'
@param req: mod_python request
@param prefix: a message to be printed before the exception in
the log
@param suffix: a message to be printed before the exception in
the log
@param alert_admin: wethever to send the exception to the administrator via
email. Note this parameter is bypassed when
CFG_SITE_ADMIN_EMAIL_EXCEPTIONS is set to a value different than 1
@param subject: overrides the email subject
@return: 1 if successfully wrote to stream, 0 if not
"""
try:
## Let's extract exception information
exc_info = sys.exc_info()
exc_name = exc_info[0].__name__
output = get_pretty_traceback(
req=req, exc_info=exc_info, skip_frames=2)
if output:
## Okay, start printing:
log_stream = StringIO()
email_stream = StringIO()
print('\n', end=' ', file=email_stream)
## If a prefix was requested let's print it
if prefix:
#prefix = _truncate_dynamic_string(prefix)
print(prefix + '\n', file=log_stream)
print(prefix + '\n', file=email_stream)
print(output, file=log_stream)
print(output, file=email_stream)
## If a suffix was requested let's print it
if suffix:
#suffix = _truncate_dynamic_string(suffix)
print(suffix, file=log_stream)
print(suffix, file=email_stream)
log_text = log_stream.getvalue()
email_text = email_stream.getvalue()
if email_text.endswith('\n'):
email_text = email_text[:-1]
## Preparing the exception dump
if stream=='error':
logger_method = current_app.logger.error
else:
logger_method = current_app.logger.info
## We now have the whole trace
written_to_log = False
try:
## Let's try to write into the log.
logger_method(log_text)
written_to_log = True
except:
written_to_log = False
filename, line_no, function_name = _get_filename_and_line(exc_info)
## let's log the exception and see whether we should report it.
log = HstEXCEPTION.get_or_create(exc_name, filename, line_no)
if log.exception_should_be_notified and (
cfg['CFG_SITE_ADMIN_EMAIL_EXCEPTIONS'] > 1 or
(alert_admin and
cfg['CFG_SITE_ADMIN_EMAIL_EXCEPTIONS'] > 0) or
not written_to_log):
## If requested or if it's impossible to write in the log
from invenio.ext.email import send_email
if not subject:
subject = 'Exception (%s:%s:%s)' % (
filename, line_no, function_name)
subject = '%s at %s' % (subject, cfg['CFG_SITE_URL'])
email_text = "\n%s\n%s" % (log.pretty_notification_info,
email_text)
if not written_to_log:
email_text += """\
Note that this email was sent to you because it has been impossible to log
this exception into %s""" % os.path.join(cfg['CFG_LOGDIR'], 'invenio.' + stream)
send_email(
cfg['CFG_SITE_ADMIN_EMAIL'],
cfg['CFG_SITE_ADMIN_EMAIL'],
subject=subject,
content=email_text)
return 1
else:
return 0
except Exception as err:
print("Error in registering exception to '%s': '%s'" % (
cfg['CFG_LOGDIR'] + '/invenio.' + stream, err), file=sys.stderr)
return 0
def raise_exception(exception_type=Exception, msg='', stream='error',
req=None, prefix='', suffix='', alert_admin=False,
subject=''):
"""
Log error exception to invenio.err and warning exception to invenio.log.
Errors will be logged together with client information (if req is
given).
It does not require a previously risen exception.
Note: For sanity reasons, dynamic params such as PREFIX, SUFFIX and
local stack variables are checked for length, and only first 500
chars of their values are printed.
@param exception_type: exception type to be used internally
@param msg: error message
@param stream: 'error' or 'warning'
@param req: mod_python request
@param prefix: a message to be printed before the exception in
the log
@param suffix: a message to be printed before the exception in
the log
@param alert_admin: wethever to send the exception to the administrator via
email. Note this parameter is bypassed when
CFG_SITE_ADMIN_EMAIL_EXCEPTIONS is set to a value different than 1
@param subject: overrides the email subject
@return: 1 if successfully wrote to stream, 0 if not
"""
try:
raise exception_type(msg)
except:
return register_exception(stream=stream,
req=req,
prefix=prefix,
suffix=suffix,
alert_admin=alert_admin,
subject=subject)
def send_error_report_to_admin(header, url, time_msg,
browser, client, error,
sys_error, traceback_msg):
"""
Sends an email to the admin with client info and tracestack
"""
from_addr = '%s Alert Engine <%s>' % (
cfg['CFG_SITE_NAME'], cfg['CFG_WEBALERT_ALERT_ENGINE_EMAIL'])
to_addr = cfg['CFG_SITE_ADMIN_EMAIL']
body = """
The following error was seen by a user and sent to you.
%(contact)s
%(header)s
%(url)s
%(time)s
%(browser)s
%(client)s
%(error)s
%(sys_error)s
%(traceback)s
Please see the %(logdir)s/invenio.err for traceback details.""" % {
'header': header,
'url': url,
'time': time_msg,
'browser': browser,
'client': client,
'error': error,
'sys_error': sys_error,
'traceback': traceback_msg,
'logdir': cfg['CFG_LOGDIR'],
'contact': "Please contact %s quoting the following information:" %
(cfg['CFG_SITE_SUPPORT_EMAIL'], )}
from invenio.ext.email import send_email
send_email(from_addr, to_addr, subject="Error notification", content=body)
def _get_filename_and_line(exc_info):
"""Return the filename, the line and the function_name where
the exception happened."""
tb = exc_info[2]
exception_info = traceback.extract_tb(tb)[-1]
filename = os.path.basename(exception_info[0])
line_no = exception_info[1]
function_name = exception_info[2]
return filename, line_no, function_name
def _truncate_dynamic_string(val, maxlength=500):
"""
Return at most MAXLENGTH characters of VAL. Useful for
sanitizing dynamic variable values in the output.
"""
out = repr(val)
if len(out) > maxlength:
out = out[:maxlength] + ' [...]'
return out
def wrap_warn():
import warnings
from functools import wraps
def wrapper(showwarning):
@wraps(showwarning)
def new_showwarning(message=None, category=None, filename=None,
lineno=None, file=None, line=None):
current_app.logger.warning("* %(time)s -> WARNING: %(category)s: %(message)s (%(file)s:%(line)s)\n" % {
'time': time.strftime("%Y-%m-%d %H:%M:%S"),
'category': category,
'message': message,
'file': filename,
'line': lineno} + "** Traceback details\n" +
str(traceback.format_stack()) + "\n")
return new_showwarning
warnings.showwarning = wrapper(warnings.showwarning)
| gpl-2.0 | 736,123,673,270,958,700 | -469,470,828,461,297,000 | 36.452055 | 115 | 0.564845 | false |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/test/test_isinstance.py | 14 | 10083 | # Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import test_support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - inst isn't an InstanceType
# - cls isn't a ClassType, a TypeType, or a TupleType
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (long, (float, int))))
if test_support.have_unicode:
self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
for cnt in xrange(sys.getrecursionlimit()+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
test_support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
| bsd-2-clause | 1,566,068,520,075,555,600 | -448,324,297,075,740,800 | 33.77305 | 93 | 0.63255 | false |
parkrrr/skybot | plugins/snopes.py | 22 | 1057 | import re
from util import hook, http
search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000"
@hook.command
def snopes(inp):
".snopes <topic> -- searches snopes for an urban legend about <topic>"
search_page = http.get_html(search_url, sp_q=inp, sp_c="1")
result_urls = search_page.xpath("//a[@target='_self']/@href")
if not result_urls:
return "no matching pages found"
snopes_page = http.get_html(result_urls[0])
snopes_text = snopes_page.text_content()
claim = re.search(r"Claim: .*", snopes_text).group(0).strip()
status = re.search(r"Status: .*", snopes_text)
if status is not None:
status = status.group(0).strip()
else: # new-style statuses
status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED",
snopes_text).group(0).title()
claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace
status = re.sub(r"[\s\xa0]+", " ", status)
return "%s %s %s" % (claim, status, result_urls[0])
| unlicense | -7,031,072,641,661,046,000 | -1,127,114,956,645,852,500 | 30.088235 | 78 | 0.600757 | false |
ldoktor/autotest | frontend/afe/model_logic.py | 4 | 43717 | """
Extensions to Django's model logic.
"""
import re
import django.core.exceptions
from django.db import models as dbmodels, backend, connection
from django.db.models.sql import query
import django.db.models.sql.where
from django.utils import datastructures
from autotest.frontend.afe import readonly_connection
class ValidationError(Exception):
"""\
Data validation error in adding or updating an object. The associated
value is a dictionary mapping field names to error strings.
"""
def _wrap_with_readonly(method):
def wrapper_method(*args, **kwargs):
readonly_connection.connection().set_django_connection()
try:
return method(*args, **kwargs)
finally:
readonly_connection.connection().unset_django_connection()
wrapper_method.__name__ = method.__name__
return wrapper_method
def _quote_name(name):
"""Shorthand for connection.ops.quote_name()."""
return connection.ops.quote_name(name)
def _wrap_generator_with_readonly(generator):
"""
We have to wrap generators specially. Assume it performs
the query on the first call to next().
"""
def wrapper_generator(*args, **kwargs):
generator_obj = generator(*args, **kwargs)
readonly_connection.connection().set_django_connection()
try:
first_value = generator_obj.next()
finally:
readonly_connection.connection().unset_django_connection()
yield first_value
while True:
yield generator_obj.next()
wrapper_generator.__name__ = generator.__name__
return wrapper_generator
def _make_queryset_readonly(queryset):
"""
Wrap all methods that do database queries with a readonly connection.
"""
db_query_methods = ['count', 'get', 'get_or_create', 'latest', 'in_bulk',
'delete']
for method_name in db_query_methods:
method = getattr(queryset, method_name)
wrapped_method = _wrap_with_readonly(method)
setattr(queryset, method_name, wrapped_method)
queryset.iterator = _wrap_generator_with_readonly(queryset.iterator)
class ReadonlyQuerySet(dbmodels.query.QuerySet):
"""
QuerySet object that performs all database queries with the read-only
connection.
"""
def __init__(self, model=None, *args, **kwargs):
super(ReadonlyQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
def values(self, *fields):
return self._clone(klass=ReadonlyValuesQuerySet,
setup=True, _fields=fields)
class ReadonlyValuesQuerySet(dbmodels.query.ValuesQuerySet):
def __init__(self, model=None, *args, **kwargs):
super(ReadonlyValuesQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
class ExtendedManager(dbmodels.Manager):
"""\
Extended manager supporting subquery filtering.
"""
class CustomQuery(query.Query):
def __init__(self, *args, **kwargs):
super(ExtendedManager.CustomQuery, self).__init__(*args, **kwargs)
self._custom_joins = []
def clone(self, klass=None, **kwargs):
obj = super(ExtendedManager.CustomQuery, self).clone(klass)
obj._custom_joins = list(self._custom_joins)
return obj
def combine(self, rhs, connector):
super(ExtendedManager.CustomQuery, self).combine(rhs, connector)
if hasattr(rhs, '_custom_joins'):
self._custom_joins.extend(rhs._custom_joins)
def add_custom_join(self, table, condition, join_type,
condition_values=(), alias=None):
if alias is None:
alias = table
join_dict = dict(table=table,
condition=condition,
condition_values=condition_values,
join_type=join_type,
alias=alias)
self._custom_joins.append(join_dict)
@classmethod
def convert_query(self, query_set):
"""
Convert the query set's "query" attribute to a CustomQuery.
"""
# Make a copy of the query set
query_set = query_set.all()
query_set.query = query_set.query.clone(
klass=ExtendedManager.CustomQuery,
_custom_joins=[])
return query_set
class _WhereClause(object):
"""Object allowing us to inject arbitrary SQL into Django queries.
By using this instead of extra(where=...), we can still freely combine
queries with & and |.
"""
def __init__(self, clause, values=()):
self._clause = clause
self._values = values
def as_sql(self, qn=None, connection=None):
return self._clause, self._values
def relabel_aliases(self, change_map):
return
def add_join(self, query_set, join_table, join_key, join_condition='',
join_condition_values=(), join_from_key=None, alias=None,
suffix='', exclude=False, force_left_join=False):
"""Add a join to query_set.
Join looks like this:
(INNER|LEFT) JOIN <join_table> AS <alias>
ON (<this table>.<join_from_key> = <join_table>.<join_key>
and <join_condition>)
@param join_table table to join to
@param join_key field referencing back to this model to use for the join
@param join_condition extra condition for the ON clause of the join
@param join_condition_values values to substitute into join_condition
@param join_from_key column on this model to join from.
@param alias alias to use for for join
@param suffix suffix to add to join_table for the join alias, if no
alias is provided
@param exclude if true, exclude rows that match this join (will use a
LEFT OUTER JOIN and an appropriate WHERE condition)
@param force_left_join - if true, a LEFT OUTER JOIN will be used
instead of an INNER JOIN regardless of other options
"""
join_from_table = query_set.model._meta.db_table
if join_from_key is None:
join_from_key = self.model._meta.pk.name
if alias is None:
alias = join_table + suffix
full_join_key = _quote_name(alias) + '.' + _quote_name(join_key)
full_join_condition = '%s = %s.%s' % (full_join_key,
_quote_name(join_from_table),
_quote_name(join_from_key))
if join_condition:
full_join_condition += ' AND (' + join_condition + ')'
if exclude or force_left_join:
join_type = query_set.query.LOUTER
else:
join_type = query_set.query.INNER
query_set = self.CustomQuery.convert_query(query_set)
query_set.query.add_custom_join(join_table,
full_join_condition,
join_type,
condition_values=join_condition_values,
alias=alias)
if exclude:
query_set = query_set.extra(where=[full_join_key + ' IS NULL'])
return query_set
def _info_for_many_to_one_join(self, field, join_to_query, alias):
"""
@param field: the ForeignKey field on the related model
@param join_to_query: the query over the related model that we're
joining to
@param alias: alias of joined table
"""
info = {}
rhs_table = join_to_query.model._meta.db_table
info['rhs_table'] = rhs_table
info['rhs_column'] = field.column
info['lhs_column'] = field.rel.get_related_field().column
rhs_where = join_to_query.query.where
rhs_where.relabel_aliases({rhs_table: alias})
compiler = join_to_query.query.get_compiler(using=join_to_query.db)
where_clause, values = rhs_where.as_sql(
compiler.quote_name_unless_alias,
compiler.connection)
info['where_clause'] = where_clause
info['values'] = values
return info
def _info_for_many_to_many_join(self, m2m_field, join_to_query, alias,
m2m_is_on_this_model):
"""
@param m2m_field: a Django field representing the M2M relationship.
It uses a pivot table with the following structure:
this model table <---> M2M pivot table <---> joined model table
@param join_to_query: the query over the related model that we're
joining to.
@param alias: alias of joined table
"""
if m2m_is_on_this_model:
# referenced field on this model
lhs_id_field = self.model._meta.pk
# foreign key on the pivot table referencing lhs_id_field
m2m_lhs_column = m2m_field.m2m_column_name()
# foreign key on the pivot table referencing rhd_id_field
m2m_rhs_column = m2m_field.m2m_reverse_name()
# referenced field on related model
rhs_id_field = m2m_field.rel.get_related_field()
else:
lhs_id_field = m2m_field.rel.get_related_field()
m2m_lhs_column = m2m_field.m2m_reverse_name()
m2m_rhs_column = m2m_field.m2m_column_name()
rhs_id_field = join_to_query.model._meta.pk
info = {}
info['rhs_table'] = m2m_field.m2m_db_table()
info['rhs_column'] = m2m_lhs_column
info['lhs_column'] = lhs_id_field.column
# select the ID of related models relevant to this join. we can only do
# a single join, so we need to gather this information up front and
# include it in the join condition.
rhs_ids = join_to_query.values_list(rhs_id_field.attname, flat=True)
assert len(rhs_ids) == 1, ('Many-to-many custom field joins can only '
'match a single related object.')
rhs_id = rhs_ids[0]
info['where_clause'] = '%s.%s = %s' % (_quote_name(alias),
_quote_name(m2m_rhs_column),
rhs_id)
info['values'] = ()
return info
def join_custom_field(self, query_set, join_to_query, alias,
left_join=True):
"""Join to a related model to create a custom field in the given query.
This method is used to construct a custom field on the given query based
on a many-valued relationsip. join_to_query should be a simple query
(no joins) on the related model which returns at most one related row
per instance of this model.
For many-to-one relationships, the joined table contains the matching
row from the related model it one is related, NULL otherwise.
For many-to-many relationships, the joined table contains the matching
row if it's related, NULL otherwise.
"""
relationship_type, field = self.determine_relationship(
join_to_query.model)
if relationship_type == self.MANY_TO_ONE:
info = self._info_for_many_to_one_join(field, join_to_query, alias)
elif relationship_type == self.M2M_ON_RELATED_MODEL:
info = self._info_for_many_to_many_join(
m2m_field=field, join_to_query=join_to_query, alias=alias,
m2m_is_on_this_model=False)
elif relationship_type ==self.M2M_ON_THIS_MODEL:
info = self._info_for_many_to_many_join(
m2m_field=field, join_to_query=join_to_query, alias=alias,
m2m_is_on_this_model=True)
return self.add_join(query_set, info['rhs_table'], info['rhs_column'],
join_from_key=info['lhs_column'],
join_condition=info['where_clause'],
join_condition_values=info['values'],
alias=alias,
force_left_join=left_join)
def key_on_joined_table(self, join_to_query):
"""Get a non-null column on the table joined for the given query.
This analyzes the join that would be produced if join_to_query were
passed to join_custom_field.
"""
relationship_type, field = self.determine_relationship(
join_to_query.model)
if relationship_type == self.MANY_TO_ONE:
return join_to_query.model._meta.pk.column
return field.m2m_column_name() # any column on the M2M table will do
def add_where(self, query_set, where, values=()):
query_set = query_set.all()
query_set.query.where.add(self._WhereClause(where, values),
django.db.models.sql.where.AND)
return query_set
def _get_quoted_field(self, table, field):
return _quote_name(table) + '.' + _quote_name(field)
def get_key_on_this_table(self, key_field=None):
if key_field is None:
# default to primary key
key_field = self.model._meta.pk.column
return self._get_quoted_field(self.model._meta.db_table, key_field)
def escape_user_sql(self, sql):
return sql.replace('%', '%%')
def _custom_select_query(self, query_set, selects):
compiler = query_set.query.get_compiler(using=query_set.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
if query_set.query.distinct:
distinct = 'DISTINCT '
else:
distinct = ''
sql_query = ('SELECT ' + distinct + ','.join(selects) + from_)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql_query, params)
return cursor.fetchall()
def _is_relation_to(self, field, model_class):
return field.rel and field.rel.to is model_class
MANY_TO_ONE = object()
M2M_ON_RELATED_MODEL = object()
M2M_ON_THIS_MODEL = object()
def determine_relationship(self, related_model):
"""
Determine the relationship between this model and related_model.
related_model must have some sort of many-valued relationship to this
manager's model.
@returns (relationship_type, field), where relationship_type is one of
MANY_TO_ONE, M2M_ON_RELATED_MODEL, M2M_ON_THIS_MODEL, and field
is the Django field object for the relationship.
"""
# look for a foreign key field on related_model relating to this model
for field in related_model._meta.fields:
if self._is_relation_to(field, self.model):
return self.MANY_TO_ONE, field
# look for an M2M field on related_model relating to this model
for field in related_model._meta.many_to_many:
if self._is_relation_to(field, self.model):
return self.M2M_ON_RELATED_MODEL, field
# maybe this model has the many-to-many field
for field in self.model._meta.many_to_many:
if self._is_relation_to(field, related_model):
return self.M2M_ON_THIS_MODEL, field
raise ValueError('%s has no relation to %s' %
(related_model, self.model))
def _get_pivot_iterator(self, base_objects_by_id, related_model):
"""
Determine the relationship between this model and related_model, and
return a pivot iterator.
@param base_objects_by_id: dict of instances of this model indexed by
their IDs
@returns a pivot iterator, which yields a tuple (base_object,
related_object) for each relationship between a base object and a
related object. all base_object instances come from base_objects_by_id.
Note -- this depends on Django model internals.
"""
relationship_type, field = self.determine_relationship(related_model)
if relationship_type == self.MANY_TO_ONE:
return self._many_to_one_pivot(base_objects_by_id,
related_model, field)
elif relationship_type == self.M2M_ON_RELATED_MODEL:
return self._many_to_many_pivot(
base_objects_by_id, related_model, field.m2m_db_table(),
field.m2m_reverse_name(), field.m2m_column_name())
else:
assert relationship_type == self.M2M_ON_THIS_MODEL
return self._many_to_many_pivot(
base_objects_by_id, related_model, field.m2m_db_table(),
field.m2m_column_name(), field.m2m_reverse_name())
def _many_to_one_pivot(self, base_objects_by_id, related_model,
foreign_key_field):
"""
@returns a pivot iterator - see _get_pivot_iterator()
"""
filter_data = {foreign_key_field.name + '__pk__in':
base_objects_by_id.keys()}
for related_object in related_model.objects.filter(**filter_data):
# lookup base object in the dict, rather than grabbing it from the
# related object. we need to return instances from the dict, not
# fresh instances of the same models (and grabbing model instances
# from the related models incurs a DB query each time).
base_object_id = getattr(related_object, foreign_key_field.attname)
base_object = base_objects_by_id[base_object_id]
yield base_object, related_object
def _query_pivot_table(self, base_objects_by_id, pivot_table,
pivot_from_field, pivot_to_field):
"""
@param id_list list of IDs of self.model objects to include
@param pivot_table the name of the pivot table
@param pivot_from_field a field name on pivot_table referencing
self.model
@param pivot_to_field a field name on pivot_table referencing the
related model.
@returns pivot list of IDs (base_id, related_id)
"""
query = """
SELECT %(from_field)s, %(to_field)s
FROM %(table)s
WHERE %(from_field)s IN (%(id_list)s)
""" % dict(from_field=pivot_from_field,
to_field=pivot_to_field,
table=pivot_table,
id_list=','.join(str(id_) for id_
in base_objects_by_id.iterkeys()))
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
return cursor.fetchall()
def _many_to_many_pivot(self, base_objects_by_id, related_model,
pivot_table, pivot_from_field, pivot_to_field):
"""
@param pivot_table: see _query_pivot_table
@param pivot_from_field: see _query_pivot_table
@param pivot_to_field: see _query_pivot_table
@returns a pivot iterator - see _get_pivot_iterator()
"""
id_pivot = self._query_pivot_table(base_objects_by_id, pivot_table,
pivot_from_field, pivot_to_field)
all_related_ids = list(set(related_id for base_id, related_id
in id_pivot))
related_objects_by_id = related_model.objects.in_bulk(all_related_ids)
for base_id, related_id in id_pivot:
yield base_objects_by_id[base_id], related_objects_by_id[related_id]
def populate_relationships(self, base_objects, related_model,
related_list_name):
"""
For each instance of this model in base_objects, add a field named
related_list_name listing all the related objects of type related_model.
related_model must be in a many-to-one or many-to-many relationship with
this model.
@param base_objects - list of instances of this model
@param related_model - model class related to this model
@param related_list_name - attribute name in which to store the related
object list.
"""
if not base_objects:
# if we don't bail early, we'll get a SQL error later
return
base_objects_by_id = dict((base_object._get_pk_val(), base_object)
for base_object in base_objects)
pivot_iterator = self._get_pivot_iterator(base_objects_by_id,
related_model)
for base_object in base_objects:
setattr(base_object, related_list_name, [])
for base_object, related_object in pivot_iterator:
getattr(base_object, related_list_name).append(related_object)
class ModelWithInvalidQuerySet(dbmodels.query.QuerySet):
"""
QuerySet that handles delete() properly for models with an "invalid" bit
"""
def delete(self):
for model in self:
model.delete()
class ModelWithInvalidManager(ExtendedManager):
"""
Manager for objects with an "invalid" bit
"""
def get_query_set(self):
return ModelWithInvalidQuerySet(self.model)
class ValidObjectsManager(ModelWithInvalidManager):
"""
Manager returning only objects with invalid=False.
"""
def get_query_set(self):
queryset = super(ValidObjectsManager, self).get_query_set()
return queryset.filter(invalid=False)
class ModelExtensions(object):
"""\
Mixin with convenience functions for models, built on top of the
default Django model functions.
"""
# TODO: at least some of these functions really belong in a custom
# Manager class
field_dict = None
# subclasses should override if they want to support smart_get() by name
name_field = None
@classmethod
def get_field_dict(cls):
if cls.field_dict is None:
cls.field_dict = {}
for field in cls._meta.fields:
cls.field_dict[field.name] = field
return cls.field_dict
@classmethod
def clean_foreign_keys(cls, data):
"""\
-Convert foreign key fields in data from <field>_id to just
<field>.
-replace foreign key objects with their IDs
This method modifies data in-place.
"""
for field in cls._meta.fields:
if not field.rel:
continue
if (field.attname != field.name and
field.attname in data):
data[field.name] = data[field.attname]
del data[field.attname]
if field.name not in data:
continue
value = data[field.name]
if isinstance(value, dbmodels.Model):
data[field.name] = value._get_pk_val()
@classmethod
def _convert_booleans(cls, data):
"""
Ensure BooleanFields actually get bool values. The Django MySQL
backend returns ints for BooleanFields, which is almost always not
a problem, but it can be annoying in certain situations.
"""
for field in cls._meta.fields:
if type(field) == dbmodels.BooleanField and field.name in data:
data[field.name] = bool(data[field.name])
# TODO(showard) - is there a way to not have to do this?
@classmethod
def provide_default_values(cls, data):
"""\
Provide default values for fields with default values which have
nothing passed in.
For CharField and TextField fields with "blank=True", if nothing
is passed, we fill in an empty string value, even if there's no
default set.
"""
new_data = dict(data)
field_dict = cls.get_field_dict()
for name, obj in field_dict.iteritems():
if data.get(name) is not None:
continue
if obj.default is not dbmodels.fields.NOT_PROVIDED:
new_data[name] = obj.default
elif (isinstance(obj, dbmodels.CharField) or
isinstance(obj, dbmodels.TextField)):
new_data[name] = ''
return new_data
@classmethod
def convert_human_readable_values(cls, data, to_human_readable=False):
"""\
Performs conversions on user-supplied field data, to make it
easier for users to pass human-readable data.
For all fields that have choice sets, convert their values
from human-readable strings to enum values, if necessary. This
allows users to pass strings instead of the corresponding
integer values.
For all foreign key fields, call smart_get with the supplied
data. This allows the user to pass either an ID value or
the name of the object as a string.
If to_human_readable=True, perform the inverse - i.e. convert
numeric values to human readable values.
This method modifies data in-place.
"""
field_dict = cls.get_field_dict()
for field_name in data:
if field_name not in field_dict or data[field_name] is None:
continue
field_obj = field_dict[field_name]
# convert enum values
if field_obj.choices:
for choice_data in field_obj.choices:
# choice_data is (value, name)
if to_human_readable:
from_val, to_val = choice_data
else:
to_val, from_val = choice_data
if from_val == data[field_name]:
data[field_name] = to_val
break
# convert foreign key values
elif field_obj.rel:
dest_obj = field_obj.rel.to.smart_get(data[field_name],
valid_only=False)
if to_human_readable:
if dest_obj.name_field is not None:
data[field_name] = getattr(dest_obj,
dest_obj.name_field)
else:
data[field_name] = dest_obj
@classmethod
def validate_field_names(cls, data):
'Checks for extraneous fields in data.'
errors = {}
field_dict = cls.get_field_dict()
for field_name in data:
if field_name not in field_dict:
errors[field_name] = 'No field of this name'
return errors
@classmethod
def prepare_data_args(cls, data, kwargs):
'Common preparation for add_object and update_object'
data = dict(data) # don't modify the default keyword arg
data.update(kwargs)
# must check for extraneous field names here, while we have the
# data in a dict
errors = cls.validate_field_names(data)
if errors:
raise ValidationError(errors)
cls.convert_human_readable_values(data)
return data
def _validate_unique(self):
"""\
Validate that unique fields are unique. Django manipulators do
this too, but they're a huge pain to use manually. Trust me.
"""
errors = {}
cls = type(self)
field_dict = self.get_field_dict()
manager = cls.get_valid_manager()
for field_name, field_obj in field_dict.iteritems():
if not field_obj.unique:
continue
value = getattr(self, field_name)
if value is None and field_obj.auto_created:
# don't bother checking autoincrement fields about to be
# generated
continue
existing_objs = manager.filter(**{field_name : value})
num_existing = existing_objs.count()
if num_existing == 0:
continue
if num_existing == 1 and existing_objs[0].id == self.id:
continue
errors[field_name] = (
'This value must be unique (%s)' % (value))
return errors
def _validate(self):
"""
First coerces all fields on this instance to their proper Python types.
Then runs validation on every field. Returns a dictionary of
field_name -> error_list.
Based on validate() from django.db.models.Model in Django 0.96, which
was removed in Django 1.0. It should reappear in a later version. See:
http://code.djangoproject.com/ticket/6845
"""
error_dict = {}
for f in self._meta.fields:
try:
python_value = f.to_python(
getattr(self, f.attname, f.get_default()))
except django.core.exceptions.ValidationError, e:
error_dict[f.name] = str(e)
continue
if not f.blank and not python_value:
error_dict[f.name] = 'This field is required.'
continue
setattr(self, f.attname, python_value)
return error_dict
def do_validate(self):
errors = self._validate()
unique_errors = self._validate_unique()
for field_name, error in unique_errors.iteritems():
errors.setdefault(field_name, error)
if errors:
raise ValidationError(errors)
# actually (externally) useful methods follow
@classmethod
def add_object(cls, data={}, **kwargs):
"""\
Returns a new object created with the given data (a dictionary
mapping field names to values). Merges any extra keyword args
into data.
"""
data = cls.prepare_data_args(data, kwargs)
data = cls.provide_default_values(data)
obj = cls(**data)
obj.do_validate()
obj.save()
return obj
def update_object(self, data={}, **kwargs):
"""\
Updates the object with the given data (a dictionary mapping
field names to values). Merges any extra keyword args into
data.
"""
data = self.prepare_data_args(data, kwargs)
for field_name, value in data.iteritems():
setattr(self, field_name, value)
self.do_validate()
self.save()
# see query_objects()
_SPECIAL_FILTER_KEYS = ('query_start', 'query_limit', 'sort_by',
'extra_args', 'extra_where', 'no_distinct')
@classmethod
def _extract_special_params(cls, filter_data):
"""
@returns a tuple of dicts (special_params, regular_filters), where
special_params contains the parameters we handle specially and
regular_filters is the remaining data to be handled by Django.
"""
regular_filters = dict(filter_data)
special_params = {}
for key in cls._SPECIAL_FILTER_KEYS:
if key in regular_filters:
special_params[key] = regular_filters.pop(key)
return special_params, regular_filters
@classmethod
def apply_presentation(cls, query, filter_data):
"""
Apply presentation parameters -- sorting and paging -- to the given
query.
@returns new query with presentation applied
"""
special_params, _ = cls._extract_special_params(filter_data)
sort_by = special_params.get('sort_by', None)
if sort_by:
assert isinstance(sort_by, list) or isinstance(sort_by, tuple)
query = query.extra(order_by=sort_by)
query_start = special_params.get('query_start', None)
query_limit = special_params.get('query_limit', None)
if query_start is not None:
if query_limit is None:
raise ValueError('Cannot pass query_start without query_limit')
# query_limit is passed as a page size
query_limit += query_start
return query[query_start:query_limit]
@classmethod
def query_objects(cls, filter_data, valid_only=True, initial_query=None,
apply_presentation=True):
"""\
Returns a QuerySet object for querying the given model_class
with the given filter_data. Optional special arguments in
filter_data include:
-query_start: index of first return to return
-query_limit: maximum number of results to return
-sort_by: list of fields to sort on. prefixing a '-' onto a
field name changes the sort to descending order.
-extra_args: keyword args to pass to query.extra() (see Django
DB layer documentation)
-extra_where: extra WHERE clause to append
-no_distinct: if True, a DISTINCT will not be added to the SELECT
"""
special_params, regular_filters = cls._extract_special_params(
filter_data)
if initial_query is None:
if valid_only:
initial_query = cls.get_valid_manager()
else:
initial_query = cls.objects
query = initial_query.filter(**regular_filters)
use_distinct = not special_params.get('no_distinct', False)
if use_distinct:
query = query.distinct()
extra_args = special_params.get('extra_args', {})
extra_where = special_params.get('extra_where', None)
if extra_where:
# escape %'s
extra_where = cls.objects.escape_user_sql(extra_where)
extra_args.setdefault('where', []).append(extra_where)
if extra_args:
query = query.extra(**extra_args)
query = query._clone(klass=ReadonlyQuerySet)
if apply_presentation:
query = cls.apply_presentation(query, filter_data)
return query
@classmethod
def query_count(cls, filter_data, initial_query=None):
"""\
Like query_objects, but retreive only the count of results.
"""
filter_data.pop('query_start', None)
filter_data.pop('query_limit', None)
query = cls.query_objects(filter_data, initial_query=initial_query)
return query.count()
@classmethod
def clean_object_dicts(cls, field_dicts):
"""\
Take a list of dicts corresponding to object (as returned by
query.values()) and clean the data to be more suitable for
returning to the user.
"""
for field_dict in field_dicts:
cls.clean_foreign_keys(field_dict)
cls._convert_booleans(field_dict)
cls.convert_human_readable_values(field_dict,
to_human_readable=True)
@classmethod
def list_objects(cls, filter_data, initial_query=None):
"""\
Like query_objects, but return a list of dictionaries.
"""
query = cls.query_objects(filter_data, initial_query=initial_query)
extra_fields = query.query.extra_select.keys()
field_dicts = [model_object.get_object_dict(extra_fields=extra_fields)
for model_object in query]
return field_dicts
@classmethod
def smart_get(cls, id_or_name, valid_only=True):
"""\
smart_get(integer) -> get object by ID
smart_get(string) -> get object by name_field
"""
if valid_only:
manager = cls.get_valid_manager()
else:
manager = cls.objects
if isinstance(id_or_name, (int, long)):
return manager.get(pk=id_or_name)
if isinstance(id_or_name, basestring) and hasattr(cls, 'name_field'):
return manager.get(**{cls.name_field : id_or_name})
raise ValueError(
'Invalid positional argument: %s (%s)' % (id_or_name,
type(id_or_name)))
@classmethod
def smart_get_bulk(cls, id_or_name_list):
invalid_inputs = []
result_objects = []
for id_or_name in id_or_name_list:
try:
result_objects.append(cls.smart_get(id_or_name))
except cls.DoesNotExist:
invalid_inputs.append(id_or_name)
if invalid_inputs:
raise cls.DoesNotExist('The following %ss do not exist: %s'
% (cls.__name__.lower(),
', '.join(invalid_inputs)))
return result_objects
def get_object_dict(self, extra_fields=None):
"""\
Return a dictionary mapping fields to this object's values. @param
extra_fields: list of extra attribute names to include, in addition to
the fields defined on this object.
"""
fields = self.get_field_dict().keys()
if extra_fields:
fields += extra_fields
object_dict = dict((field_name, getattr(self, field_name))
for field_name in fields)
self.clean_object_dicts([object_dict])
self._postprocess_object_dict(object_dict)
return object_dict
def _postprocess_object_dict(self, object_dict):
"""For subclasses to override."""
pass
@classmethod
def get_valid_manager(cls):
return cls.objects
def _record_attributes(self, attributes):
"""
See on_attribute_changed.
"""
assert not isinstance(attributes, basestring)
self._recorded_attributes = dict((attribute, getattr(self, attribute))
for attribute in attributes)
def _check_for_updated_attributes(self):
"""
See on_attribute_changed.
"""
for attribute, original_value in self._recorded_attributes.iteritems():
new_value = getattr(self, attribute)
if original_value != new_value:
self.on_attribute_changed(attribute, original_value)
self._record_attributes(self._recorded_attributes.keys())
def on_attribute_changed(self, attribute, old_value):
"""
Called whenever an attribute is updated. To be overridden.
To use this method, you must:
* call _record_attributes() from __init__() (after making the super
call) with a list of attributes for which you want to be notified upon
change.
* call _check_for_updated_attributes() from save().
"""
pass
class ModelWithInvalid(ModelExtensions):
"""
Overrides model methods save() and delete() to support invalidation in
place of actual deletion. Subclasses must have a boolean "invalid"
field.
"""
def save(self, *args, **kwargs):
first_time = (self.id is None)
if first_time:
# see if this object was previously added and invalidated
my_name = getattr(self, self.name_field)
filters = {self.name_field : my_name, 'invalid' : True}
try:
old_object = self.__class__.objects.get(**filters)
self.resurrect_object(old_object)
except self.DoesNotExist:
# no existing object
pass
super(ModelWithInvalid, self).save(*args, **kwargs)
def resurrect_object(self, old_object):
"""
Called when self is about to be saved for the first time and is actually
"undeleting" a previously deleted object. Can be overridden by
subclasses to copy data as desired from the deleted entry (but this
superclass implementation must normally be called).
"""
self.id = old_object.id
def clean_object(self):
"""
This method is called when an object is marked invalid.
Subclasses should override this to clean up relationships that
should no longer exist if the object were deleted.
"""
pass
def delete(self):
self.invalid = self.invalid
assert not self.invalid
self.invalid = True
self.save()
self.clean_object()
@classmethod
def get_valid_manager(cls):
return cls.valid_objects
class Manipulator(object):
"""
Force default manipulators to look only at valid objects -
otherwise they will match against invalid objects when checking
uniqueness.
"""
@classmethod
def _prepare(cls, model):
super(ModelWithInvalid.Manipulator, cls)._prepare(model)
cls.manager = model.valid_objects
class ModelWithAttributes(object):
"""
Mixin class for models that have an attribute model associated with them.
The attribute model is assumed to have its value field named "value".
"""
def _get_attribute_model_and_args(self, attribute):
"""
Subclasses should override this to return a tuple (attribute_model,
keyword_args), where attribute_model is a model class and keyword_args
is a dict of args to pass to attribute_model.objects.get() to get an
instance of the given attribute on this object.
"""
raise NotImplementedError
def set_attribute(self, attribute, value):
attribute_model, get_args = self._get_attribute_model_and_args(
attribute)
attribute_object, _ = attribute_model.objects.get_or_create(**get_args)
attribute_object.value = value
attribute_object.save()
def delete_attribute(self, attribute):
attribute_model, get_args = self._get_attribute_model_and_args(
attribute)
try:
attribute_model.objects.get(**get_args).delete()
except attribute_model.DoesNotExist:
pass
def set_or_delete_attribute(self, attribute, value):
if value is None:
self.delete_attribute(attribute)
else:
self.set_attribute(attribute, value)
class ModelWithHashManager(dbmodels.Manager):
"""Manager for use with the ModelWithHash abstract model class"""
def create(self, **kwargs):
raise Exception('ModelWithHash manager should use get_or_create() '
'instead of create()')
def get_or_create(self, **kwargs):
kwargs['the_hash'] = self.model._compute_hash(**kwargs)
return super(ModelWithHashManager, self).get_or_create(**kwargs)
class ModelWithHash(dbmodels.Model):
"""Superclass with methods for dealing with a hash column"""
the_hash = dbmodels.CharField(max_length=40, unique=True)
objects = ModelWithHashManager()
class Meta:
abstract = True
@classmethod
def _compute_hash(cls, **kwargs):
raise NotImplementedError('Subclasses must override _compute_hash()')
def save(self, force_insert=False, **kwargs):
"""Prevents saving the model in most cases
We want these models to be immutable, so the generic save() operation
will not work. These models should be instantiated through their the
model.objects.get_or_create() method instead.
The exception is that save(force_insert=True) will be allowed, since
that creates a new row. However, the preferred way to make instances of
these models is through the get_or_create() method.
"""
if not force_insert:
# Allow a forced insert to happen; if it's a duplicate, the unique
# constraint will catch it later anyways
raise Exception('ModelWithHash is immutable')
super(ModelWithHash, self).save(force_insert=force_insert, **kwargs)
| gpl-2.0 | 7,465,274,727,495,590,000 | 5,448,120,751,785,805,000 | 36.39692 | 80 | 0.587918 | false |
djangosporti/python-oauth2 | tests/test_oauth.py | 301 | 53269 | # -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mock
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occurred.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
def test_str(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEquals(str(e), 'OAuth error occurred.')
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_build_xoauth_string(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
token = oauth.Token('user_token', 'user_secret')
url = "https://mail.google.com/mail/b/joe@example.com/imap/"
xoauth_string = oauth.build_xoauth_string(url, consumer, token)
method, oauth_url, oauth_string = xoauth_string.split(' ')
self.assertEqual("GET", method)
self.assertEqual(url, oauth_url)
returned = {}
parts = oauth_string.split(',')
for part in parts:
var, val = part.split('=')
returned[var] = val.strip('"')
self.assertEquals('HMAC-SHA1', returned['oauth_signature_method'])
self.assertEquals('user_token', returned['oauth_token'])
self.assertEquals('consumer_token', returned['oauth_consumer_key'])
self.assertTrue('oauth_signature' in returned, 'oauth_signature')
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class ReallyEqualMixin:
def failUnlessReallyEqual(self, a, b, msg=None):
self.failUnlessEqual(a, b, msg=msg)
self.failUnlessEqual(type(a), type(b), msg="a :: %r, b :: %r, %r" % (a, b, msg))
class TestFuncs(unittest.TestCase):
def test_to_unicode(self):
self.failUnlessRaises(TypeError, oauth.to_unicode, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, ['\xae'])
self.failUnlessEqual(oauth.to_unicode(':-)'), u':-)')
self.failUnlessEqual(oauth.to_unicode(u'\u00ae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode('\xc2\xae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode_optional_iterator([':-)']), [u':-)'])
self.failUnlessEqual(oauth.to_unicode_optional_iterator([u'\u00ae']), [u'\u00ae'])
class TestRequest(unittest.TestCase, ReallyEqualMixin):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
self.assertTrue(not hasattr(req, 'url') or req.url is None)
self.assertTrue(not hasattr(req, 'normalized_url') or req.normalized_url is None)
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.normalized_url, exp1)
self.assertEquals(req.url, url1)
req = oauth.Request(method, url2)
self.assertEquals(req.normalized_url, exp2)
self.assertEquals(req.url, url2)
def test_bad_url(self):
request = oauth.Request()
try:
request.url = "ftp://example.com"
self.fail("Invalid URL scheme was accepted.")
except ValueError:
pass
def test_unset_consumer_and_token(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request("GET", "http://example.com/fetch.php")
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer,
token)
self.assertEquals(consumer.key, request['oauth_consumer_key'])
self.assertEquals(token.key, request['oauth_token'])
def test_no_url_set(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request()
try:
try:
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
consumer, token)
except TypeError:
self.fail("Signature method didn't check for a normalized URL.")
except ValueError:
pass
def test_url_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
normalized_url = urlparse.urlunparse(urlparse.urlparse(url)[:3] + (None, None, None))
method = "GET"
req = oauth.Request(method, url)
self.assertEquals(req.url, url)
self.assertEquals(req.normalized_url, normalized_url)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
u'foo': u'baz',
u'bar': u'foo',
u'multi': [u'FOO',u'BAR'],
u'uni_utf8': u'\xae',
u'uni_unicode': u'\u00ae',
u'uni_unicode_2': u'åÅøØ',
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata_nonascii(self):
realm = "http://sp.example.com/"
params = {
'nonasciithing': u'q\xbfu\xe9 ,aasp u?..a.s',
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
self.failUnlessReallyEqual(req.to_postdata(), 'nonasciithing=q%C2%BFu%C3%A9%20%2Caasp%20u%3F..a.s&oauth_nonce=4572616e48616d6d65724c61686176&oauth_timestamp=137131200&oauth_consumer_key=0685bd9184jfhq22&oauth_signature_method=HMAC-SHA1&oauth_version=1.0&oauth_token=ad180jjd733klru7&oauth_signature=wOJIO9A2W5mFwDgiDvZbTSMK%252FPY%253D')
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_to_url_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
# Note: the url above already has query parameters, so append new ones with &
exp = urlparse.urlparse("%s&%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertTrue('alt' in b)
self.assertTrue('max-contacts' in b)
self.assertEquals(b['alt'], ['json'])
self.assertEquals(b['max-contacts'], ['10'])
self.assertEquals(a, b)
def test_signature_base_string_nonascii_nonutf8(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\u2766,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
def test_signature_base_string_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
self.assertEquals(req.normalized_url, 'https://www.google.com/m8/feeds/contacts/default/full/')
self.assertEquals(req.url, 'https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10')
normalized_params = parse_qsl(req.get_normalized_parameters())
self.assertTrue(len(normalized_params), len(params) + 2)
normalized_params = dict(normalized_params)
for key, value in params.iteritems():
if key == 'oauth_signature':
continue
self.assertEquals(value, normalized_params[key])
self.assertEquals(normalized_params['alt'], 'json')
self.assertEquals(normalized_params['max-contacts'], '10')
def test_get_normalized_parameters_empty(self):
url = "http://sp.example.com/?empty="
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='empty='
self.assertEquals(expected, res)
def test_get_normalized_parameters_duplicate(self):
url = "http://example.com/v2/search/videos?oauth_nonce=79815175&oauth_timestamp=1295397962&oauth_consumer_key=mykey&oauth_signature_method=HMAC-SHA1&q=car&oauth_version=1.0&offset=10&oauth_signature=spWLI%2FGQjid7sQVd5%2FarahRxzJg%3D"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='oauth_consumer_key=mykey&oauth_nonce=79815175&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1295397962&oauth_version=1.0&offset=10&q=car'
self.assertEquals(expected, res)
def test_get_normalized_parameters_from_url(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected = 'file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original'
self.assertEquals(expected, res)
def test_signing_base(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
sm = oauth.SignatureMethod_HMAC_SHA1()
consumer = oauth.Consumer('dpf43f3p2l4k3l03', 'foo')
key, raw = sm.signing_base(req, consumer, None)
expected = 'GET&http%3A%2F%2Fphotos.example.net%2Fphotos&file%3Dvacation.jpg%26oauth_consumer_key%3Ddpf43f3p2l4k3l03%26oauth_nonce%3Dkllo9940pd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1191242096%26oauth_token%3Dnnch734d00sl2jdk%26oauth_version%3D1.0%26size%3Doriginal'
self.assertEquals(expected, raw)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR', u'\u00ae', '\xc2\xae'],
'multi_same': ['FOO','FOO'],
'uni_utf8_bytes': '\xc2\xae',
'uni_unicode_object': u'\u00ae'
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected='multi=BAR&multi=FOO&multi=%C2%AE&multi=%C2%AE&multi_same=FOO&multi_same=FOO&oauth_consumer_key=0685bd9184jfhq22&oauth_nonce=4572616e48616d6d65724c61686176&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131200&oauth_token=ad180jjd733klru7&oauth_version=1.0&uni_unicode_object=%C2%AE&uni_utf8_bytes=%C2%AE'
self.assertEquals(expected, res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_set_signature_method(self):
consumer = oauth.Consumer('key', 'secret')
client = oauth.Client(consumer)
class Blah:
pass
try:
client.set_signature_method(Blah())
self.fail("Client.set_signature_method() accepted invalid method.")
except ValueError:
pass
m = oauth.SignatureMethod_HMAC_SHA1()
client.set_signature_method(m)
self.assertEquals(m, client.method)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
@mock.patch('oauth2.Request.make_timestamp')
@mock.patch('oauth2.Request.make_nonce')
def test_request_nonutf8_bytes(self, mock_make_nonce, mock_make_timestamp):
mock_make_nonce.return_value = 5
mock_make_timestamp.return_value = 6
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_token': tok.key,
'oauth_consumer_key': con.key
}
# If someone passes a sequence of bytes which is not ascii for
# url, we'll raise an exception as early as possible.
url = "http://sp.example.com/\x92" # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass an unicode, then we'll use it.
url = u'http://sp.example.com/\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'cMzvCkhvLL57+sTIxLITTHfkqZk=')
# And if it is a utf-8-encoded-then-percent-encoded non-ascii
# thing, we'll decode it and use it.
url = "http://sp.example.com/%E2%80%99"
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'yMLKOyNKC/DkyhUOb8DLSvceEWE=')
# Same thing with the params.
url = "http://sp.example.com/"
# If someone passes a sequence of bytes which is not ascii in
# params, we'll raise an exception as early as possible.
params['non_oauth_thing'] = '\xae', # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass a unicode, then we'll use it.
params['non_oauth_thing'] = u'\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], '0GU50m0v60CVDB5JnoBXnvvvKx4=')
# And if it is a utf-8-encoded non-ascii thing, we'll decode
# it and use it.
params['non_oauth_thing'] = '\xc2\xae'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'pqOCu4qvRTiGiXB8Z61Jsey0pMM=')
# Also if there are non-utf8 bytes in the query args.
url = "http://sp.example.com/?q=\x92" # cp1252
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
def test_request_hash_of_body(self):
tok = oauth.Token(key="token", secret="tok-test-secret")
con = oauth.Consumer(key="consumer", secret="con-test-secret")
# Example 1a from Appendix A.1 of
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# Except that we get a differetn result than they do.
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10288510250934,
'oauth_timestamp': 1236874155,
'oauth_consumer_key': con.key
}
url = u"http://www.example.com/resource"
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 't+MX8l/0S8hdbVQL99nD0X1fPnM=')
# oauth-bodyhash.html A.1 has
# '08bUFF%2Fjmp59mWB7cSgCYBUpJ0U%3D', but I don't see how that
# is possible.
# Example 1b
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10369470270925,
'oauth_timestamp': 1236874236,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 'CTFmrqJIGT7NsWJ42OrujahTtTc=')
# Appendix A.2
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 8628868109991,
'oauth_timestamp': 1238395022,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], '2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
self.failUnlessReallyEqual(req['oauth_signature'], 'Zhl++aWSP0O3/hYQ0CuBc7jv38I=')
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'DX01TdHws7OninCLK9VztNTH1M4=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
# Also if there are non-ascii chars in the URL.
url = "http://sp.example.com/\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
url = u'http://sp.example.com/\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
# Also if there are non-ascii chars in the query args.
url = "http://sp.example.com/?q=\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
url = u'http://sp.example.com/?q=\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
tok.set_verifier('this_is_a_test_verifier')
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
self.assertEquals(tok.verifier, req['oauth_verifier'])
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_build_authenticate_header(self):
server = oauth.Server()
headers = server.build_authenticate_header('example.com')
self.assertTrue('WWW-Authenticate' in headers)
self.assertEquals('OAuth realm="example.com"',
headers['WWW-Authenticate'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request, consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_init(self):
class Blah():
pass
try:
client = oauth.Client(Blah())
self.fail("Client.__init__() accepted invalid Consumer.")
except ValueError:
pass
consumer = oauth.Consumer('token', 'secret')
try:
client = oauth.Client(consumer, Blah())
self.fail("Client.__init__() accepted invalid Token.")
except ValueError:
pass
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
@mock.patch('httplib2.Http.request')
def test_multipart_post_does_not_alter_body(self, mockHttpRequest):
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnless(ur is uri)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], body)
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'POST')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
return random_result
mockHttpRequest.side_effect = mockrequest
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
@mock.patch('httplib2.Http.request')
def test_url_with_query_string(self, mockHttpRequest):
uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf'
client = oauth.Client(self.consumer, None)
random_result = random.randint(1,100)
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], '')
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'GET')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
req = oauth.Request.from_consumer_and_token(self.consumer, None,
http_method='GET', http_url=uri, parameters={})
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None)
expected = parse_qsl(urlparse.urlparse(req.to_url()).query)
actual = parse_qsl(urlparse.urlparse(ur).query)
self.failUnlessEqual(len(expected), len(actual))
actual = dict(actual)
for key, value in expected:
if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'):
self.failUnlessEqual(actual[key], value)
return random_result
mockHttpRequest.side_effect = mockrequest
client.request(uri, 'GET')
@mock.patch('httplib2.Http.request')
@mock.patch('oauth2.Request.from_consumer_and_token')
def test_multiple_values_for_a_key(self, mockReqConstructor, mockHttpRequest):
client = oauth.Client(self.consumer, None)
request = oauth.Request("GET", "http://example.com/fetch.php", parameters={'multi': ['1', '2']})
mockReqConstructor.return_value = request
client.request('http://whatever', 'POST', body='multi=1&multi=2')
self.failUnlessEqual(mockReqConstructor.call_count, 1)
self.failUnlessEqual(mockReqConstructor.call_args[1]['parameters'], {'multi': ['1', '2']})
self.failUnless('multi=1' in mockHttpRequest.call_args[1]['body'])
self.failUnless('multi=2' in mockHttpRequest.call_args[1]['body'])
if __name__ == "__main__":
unittest.main()
| mit | 3,637,857,528,572,395,500 | 9,130,517,387,716,690,000 | 39.68984 | 345 | 0.623848 | false |
2014c2g3/0623exam | static/Brython3.1.0-20150301-090019/Lib/weakref_1.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-3.0 | -6,148,667,097,852,037,000 | -6,159,481,067,299,747,000 | 28.857143 | 76 | 0.551196 | false |
zhukaixy/kbengine | kbe/src/lib/python/Lib/test/test_getopt.py | 173 | 6968 | # test_getopt.py
# David Goodger <dgoodger@bigfoot.com> 2000-08-19
from test.support import verbose, run_doctest, run_unittest, EnvironmentVarGuard
import unittest
import getopt
sentinel = object()
class GetoptTests(unittest.TestCase):
def setUp(self):
self.env = EnvironmentVarGuard()
if "POSIXLY_CORRECT" in self.env:
del self.env["POSIXLY_CORRECT"]
def tearDown(self):
self.env.__exit__()
del self.env
def assertError(self, *args, **kwargs):
self.assertRaises(getopt.GetoptError, *args, **kwargs)
def test_short_has_arg(self):
self.assertTrue(getopt.short_has_arg('a', 'a:'))
self.assertFalse(getopt.short_has_arg('a', 'a'))
self.assertError(getopt.short_has_arg, 'a', 'b')
def test_long_has_args(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde'])
def test_do_shorts(self):
opts, args = getopt.do_shorts([], 'a', 'a', [])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#self.assertEqual(opts, [('-a', '1')])
#self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, ['2'])
self.assertError(getopt.do_shorts, [], 'a1', 'a', [])
self.assertError(getopt.do_shorts, [], 'a', 'a:', [])
def test_do_longs(self):
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
self.assertEqual(opts, [('--abc', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
self.assertEqual(opts, [('--abcd', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
# Much like the preceding, except with a non-alpha character ("-") in
# option name that precedes "="; failed in
# http://python.org/sf/126863
opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
self.assertEqual(opts, [('--foo', '42')])
self.assertEqual(args, [])
self.assertError(getopt.do_longs, [], 'abc=1', ['abc'], [])
self.assertError(getopt.do_longs, [], 'abc', ['abc='], [])
def test_getopt(self):
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the
# command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a',
'', '--beta', 'arg1', 'arg2']
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
self.assertEqual(opts, [('-a', '1'), ('-b', ''),
('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')])
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
self.assertEqual(args, ['arg1', 'arg2'])
self.assertError(getopt.getopt, cmdline, 'a:b', ['alpha', 'beta'])
def test_gnu_getopt(self):
# Test handling of GNU style scanning mode.
cmdline = ['-a', 'arg1', '-b', '1', '--alpha', '--beta=2']
# GNU style
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(args, ['arg1'])
self.assertEqual(opts, [('-a', ''), ('-b', '1'),
('--alpha', ''), ('--beta', '2')])
# recognize "-" as an argument
opts, args = getopt.gnu_getopt(['-a', '-', '-b', '-'], 'ab:', [])
self.assertEqual(args, ['-'])
self.assertEqual(opts, [('-a', ''), ('-b', '-')])
# Posix style via +
opts, args = getopt.gnu_getopt(cmdline, '+ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
# Posix style via POSIXLY_CORRECT
self.env["POSIXLY_CORRECT"] = "1"
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
def test_libref_examples(self):
s = """
Examples from the Library Reference: Doc/lib/libgetopt.tex
An example using only Unix style options:
>>> import getopt
>>> args = '-a -b -cfoo -d bar a1 a2'.split()
>>> args
['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'abc:d:')
>>> optlist
[('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
>>> args
['a1', 'a2']
Using long option names is equally easy:
>>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
>>> args = s.split()
>>> args
['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'x', [
... 'condition=', 'output-file=', 'testing'])
>>> optlist
[('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
>>> args
['a1', 'a2']
"""
import types
m = types.ModuleType("libreftest", s)
run_doctest(m, verbose)
def test_issue4629(self):
longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
self.assertEqual(longopts, [('--help', '')])
longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
self.assertEqual(longopts, [('--help', 'x')])
self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
def test_main():
run_unittest(GetoptTests)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | -394,271,540,236,997,570 | 9,121,728,686,896,867,000 | 36.262032 | 93 | 0.502583 | false |
SanchayanMaity/gem5 | tests/configs/realview-switcheroo-full.py | 18 | 2450 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
mem_class=DDR3_1600_x64,
cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| bsd-3-clause | 2,597,605,264,254,806,500 | -9,158,632,028,593,200,000 | 49 | 72 | 0.792245 | false |
goldenbull/grpc | src/python/grpcio/tests/unit/_crust_over_core_over_links_face_interface_test.py | 5 | 6763 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Face compliance of the crust-over-core-over-gRPC-links stack."""
import collections
import unittest
import six
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.core import implementations as core_implementations
from grpc.framework.crust import implementations as crust_implementations
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.links import utilities
from tests.unit import test_common as grpc_test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.face import test_cases
from tests.unit.framework.interfaces.face import test_interfaces
class _SerializationBehaviors(
collections.namedtuple(
'_SerializationBehaviors',
('request_serializers', 'request_deserializers', 'response_serializers',
'response_deserializers',))):
pass
def _serialization_behaviors_from_test_methods(test_methods):
request_serializers = {}
request_deserializers = {}
response_serializers = {}
response_deserializers = {}
for (group, method), test_method in six.iteritems(test_methods):
request_serializers[group, method] = test_method.serialize_request
request_deserializers[group, method] = test_method.deserialize_request
response_serializers[group, method] = test_method.serialize_response
response_deserializers[group, method] = test_method.deserialize_response
return _SerializationBehaviors(
request_serializers, request_deserializers, response_serializers,
response_deserializers)
class _Implementation(test_interfaces.Implementation):
def instantiate(
self, methods, method_implementations, multi_method_implementation):
pool = logging_pool.pool(test_constants.POOL_SIZE)
servicer = crust_implementations.servicer(
method_implementations, multi_method_implementation, pool)
serialization_behaviors = _serialization_behaviors_from_test_methods(
methods)
invocation_end_link = core_implementations.invocation_end_link()
service_end_link = core_implementations.service_end_link(
servicer, test_constants.DEFAULT_TIMEOUT,
test_constants.MAXIMUM_TIMEOUT)
service_grpc_link = service.service_link(
serialization_behaviors.request_deserializers,
serialization_behaviors.response_serializers)
port = service_grpc_link.add_port('[::]:0', None)
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_grpc_link = invocation.invocation_link(
channel, b'localhost', None,
serialization_behaviors.request_serializers,
serialization_behaviors.response_deserializers)
invocation_end_link.join_link(invocation_grpc_link)
invocation_grpc_link.join_link(invocation_end_link)
service_grpc_link.join_link(service_end_link)
service_end_link.join_link(service_grpc_link)
service_end_link.start()
invocation_end_link.start()
invocation_grpc_link.start()
service_grpc_link.start()
generic_stub = crust_implementations.generic_stub(invocation_end_link, pool)
# TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
group = next(iter(methods))[0]
# TODO(nathaniel): Add a "cardinalities_by_group" attribute to
# _digest.TestServiceDigest.
cardinalities = {
method: method_object.cardinality()
for (group, method), method_object in six.iteritems(methods)}
dynamic_stub = crust_implementations.dynamic_stub(
invocation_end_link, group, cardinalities, pool)
return generic_stub, {group: dynamic_stub}, (
invocation_end_link, invocation_grpc_link, service_grpc_link,
service_end_link, pool)
def destantiate(self, memo):
(invocation_end_link, invocation_grpc_link, service_grpc_link,
service_end_link, pool) = memo
invocation_end_link.stop(0).wait()
invocation_grpc_link.stop()
service_grpc_link.begin_stop()
service_end_link.stop(0).wait()
service_grpc_link.end_stop()
invocation_end_link.join_link(utilities.NULL_LINK)
invocation_grpc_link.join_link(utilities.NULL_LINK)
service_grpc_link.join_link(utilities.NULL_LINK)
service_end_link.join_link(utilities.NULL_LINK)
pool.shutdown(wait=True)
def invocation_metadata(self):
return grpc_test_common.INVOCATION_INITIAL_METADATA
def initial_metadata(self):
return grpc_test_common.SERVICE_INITIAL_METADATA
def terminal_metadata(self):
return grpc_test_common.SERVICE_TERMINAL_METADATA
def code(self):
return beta_interfaces.StatusCode.OK
def details(self):
return grpc_test_common.DETAILS
def metadata_transmitted(self, original_metadata, transmitted_metadata):
return original_metadata is None or grpc_test_common.metadata_transmitted(
original_metadata, transmitted_metadata)
def load_tests(loader, tests, pattern):
return unittest.TestSuite(
tests=tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in test_cases.test_cases(_Implementation())))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | -4,207,729,772,506,178,600 | 4,064,961,377,136,021,500 | 40.490798 | 80 | 0.750259 | false |
mujiansu/pip | pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit | 2,783,508,661,708,100,600 | 6,206,374,650,867,562,000 | 40 | 69 | 0.718025 | false |
matthdsm/bioconda-recipes | recipes/peptide-shaker/1.16.16/peptide-shaker.py | 45 | 3272 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'PeptideShaker-1.16.16.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit | -5,360,491,323,843,777,000 | 853,957,738,048,421,900 | 31.078431 | 175 | 0.640587 | false |
nguy/brawl4d | LMA/controller.py | 1 | 10240 | """ Support for LMA data display in brawl4d.
These are meant to be lightweight wrappers to coordinate data formats
understood by the lmatools package.
"""
import numpy as np
from lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile
from stormdrain.bounds import Bounds, BoundsFilter
from stormdrain.data import NamedArrayDataset, indexed
from stormdrain.pipeline import Branchpoint, coroutine, ItemModifier
from stormdrain.support.matplotlib.artistupdaters import PanelsScatterController
from stormdrain.support.matplotlib.poly_lasso import LassoPayloadController
class LMAAnimator(object):
def __init__(self, duration, variable='time'):
self.tstart = time.time()
self.duration = duration
def draw_frame(self, animator, time_fraction):
pass
def init_draw(self, animator):
pass
class LMAController(object):
""" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.
"""
z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }
def __init__(self, *args, **kwargs):
super(LMAController, self).__init__(*args, **kwargs)
self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))
self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1))
self.datasets = set()
self.flash_datasets = set()
def pipeline_for_dataset(self, d, panels,
names4d=('lon', 'lat', 'alt', 'time'),
transform_mapping=None,
scatter_kwargs = {}
):
""" Set 4d_names to the spatial coordinate names in d that provide
longitude, latitude, altitude, and time. Default of
lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds
entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib
scatter call.
"""
# Set up dataset -> time-height bound filter -> brancher
branch = Branchpoint([])
brancher = branch.broadcast()
# strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.
# therefore, add some padding. filtered again later after projection.
quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()
if transform_mapping is None:
transform_mapping = self.z_alt_mapping
# Use 'time', which is the name in panels.bounds, and not names4d[3], which should
# is linked to 'time' by transform_mapping if necessary
bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds,
restrict_to=('time'), transform_mapping=transform_mapping)
filterer = bound_filter.filter()
d.target = filterer
# Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater
scatter_ctrl = PanelsScatterController(
panels=panels,
color_field=names4d[3],
default_color_bounds=self.default_color_bounds,
**scatter_kwargs)
scatter_outlet_broadcaster = scatter_ctrl.branchpoint
scatter_updater = scatter_outlet_broadcaster.broadcast()
final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)
final_filterer = final_bound_filter.filter()
cs_transformer = panels.cs.project_points(
target=final_filterer,
x_coord='x', y_coord='y', z_coord='z',
lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],
distance_scale_factor=1.0e-3)
branch.targets.add(cs_transformer)
# return each broadcaster so that other things can tap into results of transformation of this dataset
return branch, scatter_ctrl
@coroutine
def flash_stat_printer(self, min_points=10):
while True:
ev, fl = (yield)
template = "{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2"
N = len(fl)
good = (fl['n_points'] >= min_points)
N_good = len(fl[good])
area = np.mean(fl['area'][good])
print template.format(N_good, N, area, min_points)
def flash_stats_for_dataset(self, d, selection_broadcaster):
flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])
flash_stat_brancher = flash_stat_branchpoint.broadcast()
@coroutine
def flash_data_for_selection(target, flash_id_key = 'flash_id'):
""" Accepts an array of event data from the pipeline, and sends
event and flash data.
"""
while True:
ev = (yield) # array of event data
fl_dat = d.flash_data
flash_ids = set(ev[flash_id_key])
flashes = np.fromiter(
(fl for fl in fl_dat if fl[flash_id_key] in flash_ids),
dtype=fl_dat.dtype)
target.send((ev, flashes))
selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))
return flash_stat_branchpoint
@indexed()
def read_dat(self, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
lma = LMAdataFile(*args, **kwargs)
stn = lma.stations # adds stations to lma.data as a side-effect
d = NamedArrayDataset(lma.data)
self.datasets.add(d)
return d
def load_dat_to_panels(self, panels, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
d = self.read_dat(*args, **kwargs)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)
branch_to_scatter_artists = scatter_ctrl.branchpoint
# ask for a copy of the array from each selection operation, so that
# it's saved and ready for any lasso operations
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
@indexed(index_name='hdf_row_idx')
def read_hdf5(self, LMAfileHDF):
try:
import tables
except ImportError:
print "couldn't import pytables"
return None
from hdf5_lma import HDF5Dataset
# get the HDF5 table name
LMAh5 = tables.openFile(LMAfileHDF, 'r')
table_names = LMAh5.root.events._v_children.keys()
table_path = '/events/' + table_names[0]
LMAh5.close()
d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')
self.datasets.add(d)
if d.flash_table is not None:
print "found flash data"
return d
def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):
d = self.read_hdf5(LMAfileHDF)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)
branch_to_scatter_artists = scatter_ctrl.branchpoint
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(index_name='hdf_row_idx',
field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):
""" Set up a flash dataset display. The sole argument is usually the HDF5
LMA dataset returned by a call to self.load_hdf5_to_panels """
from hdf5_lma import HDF5FlashDataset
if hdf5dataset.flash_table is not None:
point_count_dtype = hdf5dataset.flash_data['n_points'].dtype
self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))
flash_d = HDF5FlashDataset(hdf5dataset)
transform_mapping = {}
transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )
transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )
transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )
transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )
flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels,
transform_mapping=transform_mapping,
names4d=('init_lon', 'init_lat', 'init_alt', 'start') )
for art in flash_scatter_ctrl.artist_outlet_controllers:
# there is no time variable, but the artist updater is set to expect
# time. Patch that up.
if art.coords == ('time', 'z'):
art.coords = ('start', 'z')
# Draw flash markers in a different style
art.artist.set_edgecolor('k')
self.flash_datasets.add(flash_d)
return flash_d, flash_post_filter_brancher, flash_scatter_ctrl
class LassoChargeController(LassoPayloadController):
""" The "charge" attribute is one of {-1, 0, 1} to set
negative, unclassified, or positive charge, or None
to do nothing.
"""
charge = LassoPayloadController.Payload() | bsd-2-clause | -6,210,142,639,579,417,000 | -4,000,469,828,362,510,300 | 45.130631 | 125 | 0.578906 | false |
DirtyUnicorns/android_external_chromium-org | tools/telemetry_tools/telemetry_bootstrap.py | 24 | 5468 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap Chrome Telemetry by downloading all its files from SVN servers.
Requires a DEPS file to specify which directories on which SVN servers
are required to run Telemetry. Format of that DEPS file is a subset of the
normal DEPS file format[1]; currently only only the "deps" dictionary is
supported and nothing else.
Fetches all files in the specified directories using WebDAV (SVN is WebDAV under
the hood).
[1] http://dev.chromium.org/developers/how-tos/depottools#TOC-DEPS-file
"""
import imp
import logging
import os
import urllib
import urlparse
# Dummy module for DAVclient.
davclient = None
# Link to file containing the 'davclient' WebDAV client library.
_DAVCLIENT_URL = ('https://src.chromium.org/chrome/trunk/src/tools/'
'telemetry/third_party/davclient/davclient.py')
def _DownloadAndImportDAVClientModule():
"""Dynamically import davclient helper library."""
global davclient
davclient_src = urllib.urlopen(_DAVCLIENT_URL).read()
davclient = imp.new_module('davclient')
exec davclient_src in davclient.__dict__
class DAVClientWrapper():
"""Knows how to retrieve subdirectories and files from WebDAV/SVN servers."""
def __init__(self, root_url):
"""Initialize SVN server root_url, save files to local dest_dir.
Args:
root_url: string url of SVN/WebDAV server
"""
self.root_url = root_url
self.client = davclient.DAVClient(root_url)
@staticmethod
def __norm_path_keys(dict_with_path_keys):
"""Returns a dictionary with os.path.normpath called on every key."""
return dict((os.path.normpath(k), v) for (k, v) in
dict_with_path_keys.items())
def GetDirList(self, path):
"""Returns string names of all files and subdirs of path on the server."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
# remove this path
del props[os.path.normpath(path)]
return [os.path.basename(p) for p in props.keys()]
def IsFile(self, path):
"""Returns True if the path is a file on the server, False if directory."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
return props[os.path.normpath(path)]['resourcetype'] is None
def Traverse(self, src_path, dst_path):
"""Walks the directory hierarchy pointed to by src_path download all files.
Recursively walks src_path and saves all files and subfolders into
dst_path.
Args:
src_path: string path on SVN server to save (absolute path on server).
dest_path: string local path (relative or absolute) to save to.
"""
if self.IsFile(src_path):
if not os.path.exists(os.path.dirname(dst_path)):
logging.info('Creating %s', os.path.dirname(dst_path))
os.makedirs(os.path.dirname(dst_path))
if os.path.isfile(dst_path):
logging.info('Skipping %s', dst_path)
else:
logging.info('Saving %s to %s', self.root_url + src_path, dst_path)
urllib.urlretrieve(self.root_url + src_path, dst_path)
return
else:
for subdir in self.GetDirList(src_path):
self.Traverse(os.path.join(src_path, subdir),
os.path.join(dst_path, subdir))
def ListAllDepsPaths(deps_file):
"""Recursively returns a list of all paths indicated in this deps file.
Note that this discards information about where path dependencies come from,
so this is only useful in the context of a Chromium source checkout that has
already fetched all dependencies.
Args:
deps_file: File containing deps information to be evaluated, in the
format given in the header of this file.
Returns:
A list of string paths starting under src that are required by the
given deps file, and all of its sub-dependencies. This amounts to
the keys of the 'deps' dictionary.
"""
deps = {}
deps_includes = {}
chrome_root = os.path.dirname(__file__)
while os.path.basename(chrome_root) != 'src':
chrome_root = os.path.abspath(os.path.join(chrome_root, os.pardir))
exec open(deps_file).read()
deps_paths = deps.keys()
for path in deps_includes.keys():
# Need to localize the paths.
path = os.path.join(chrome_root, os.pardir, path)
deps_paths += ListAllDepsPaths(path)
return deps_paths
def DownloadDeps(destination_dir, url):
"""Saves all the dependencies in deps_path.
Opens and reads url, assuming the contents are in the simple DEPS-like file
format specified in the header of this file, then download all
files/directories listed to the destination_dir.
Args:
destination_dir: String path to directory to download files into.
url: URL containing deps information to be evaluated.
"""
logging.warning('Downloading deps from %s...', url)
# TODO(wiltzius): Add a parameter for which revision to pull.
_DownloadAndImportDAVClientModule()
deps = {}
deps_includes = {}
exec urllib.urlopen(url).read()
for dst_path, src_path in deps.iteritems():
full_dst_path = os.path.join(destination_dir, dst_path)
parsed_url = urlparse.urlparse(src_path)
root_url = parsed_url.scheme + '://' + parsed_url.netloc
dav_client = DAVClientWrapper(root_url)
dav_client.Traverse(parsed_url.path, full_dst_path)
for url in deps_includes.values():
DownloadDeps(destination_dir, url)
| bsd-3-clause | -9,060,715,623,648,556,000 | -4,240,138,147,008,771,000 | 33.175 | 80 | 0.699525 | false |
troya2/pjsip | pjsip-apps/src/swig/python/test.py | 44 | 3447 | import pjsua2 as pj
import sys
import time
#
# Basic data structure test, to make sure basic struct
# and array operations work
#
def ua_data_test():
#
# AuthCredInfo
#
print "UA data types test.."
the_realm = "pjsip.org"
ci = pj.AuthCredInfo()
ci.realm = the_realm
ci.dataType = 20
ci2 = ci
assert ci.dataType == 20
assert ci2.realm == the_realm
#
# UaConfig
# See here how we manipulate std::vector
#
uc = pj.UaConfig()
uc.maxCalls = 10
uc.userAgent = "Python"
uc.nameserver = pj.StringVector(["10.0.0.1", "10.0.0.2"])
uc.nameserver.append("NS1")
uc2 = uc
assert uc2.maxCalls == 10
assert uc2.userAgent == "Python"
assert len(uc2.nameserver) == 3
assert uc2.nameserver[0] == "10.0.0.1"
assert uc2.nameserver[1] == "10.0.0.2"
assert uc2.nameserver[2] == "NS1"
print " Dumping nameservers: ",
for s in uc2.nameserver:
print s,
print ""
#
# Exception test
#
def ua_run_test_exception():
print "Exception test.."
ep = pj.Endpoint()
ep.libCreate()
got_exception = False
try:
ep.natDetectType()
except pj.Error, e:
got_exception = True
print " Got exception: status=%u, reason=%s,\n title=%s,\n srcFile=%s, srcLine=%d" % \
(e.status, e.reason, e.title, e.srcFile, e.srcLine)
assert e.status == 370050
assert e.reason.find("PJNATH_ESTUNINSERVER") >= 0
assert e.title == "pjsua_detect_nat_type()"
assert got_exception
#
# Custom log writer
#
class MyLogWriter(pj.LogWriter):
def write(self, entry):
print "This is Python:", entry.msg
#
# Testing log writer callback
#
def ua_run_log_test():
print "Logging test.."
ep_cfg = pj.EpConfig()
lw = MyLogWriter()
ep_cfg.logConfig.writer = lw
ep_cfg.logConfig.decor = ep_cfg.logConfig.decor & ~(pj.PJ_LOG_HAS_CR | pj.PJ_LOG_HAS_NEWLINE)
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libDestroy()
#
# Simple create, init, start, and destroy sequence
#
def ua_run_ua_test():
print "UA test run.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
print "************* Endpoint started ok, now shutting down... *************"
ep.libDestroy()
#
# Tone generator
#
def ua_tonegen_test():
print "UA tonegen test.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
tonegen = pj.ToneGenerator()
tonegen.createToneGenerator()
tone = pj.ToneDesc()
tone.freq1 = 400
tone.freq2 = 600
tone.on_msec = 1000
tone.off_msec = 1000
tones = pj.ToneDescVector()
tones.append(tone)
digit = pj.ToneDigit()
digit.digit = '0'
digit.on_msec = 1000
digit.off_msec = 1000
digits = pj.ToneDigitVector()
digits.append(digit)
adm = ep.audDevManager()
spk = adm.getPlaybackDevMedia()
tonegen.play(tones, True)
tonegen.startTransmit(spk)
time.sleep(5)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
dm = tonegen.getDigitMap()
print dm[0].digit
dm[0].freq1 = 400
dm[0].freq2 = 600
tonegen.setDigitMap(dm)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
tonegen = None
ep.libDestroy()
#
# main()
#
if __name__ == "__main__":
ua_data_test()
ua_run_test_exception()
ua_run_log_test()
ua_run_ua_test()
ua_tonegen_test()
sys.exit(0)
| gpl-2.0 | 8,007,554,818,144,679,000 | -2,793,494,711,541,843,000 | 19.39645 | 95 | 0.619379 | false |
ashokpant/clandmark | python_interface/bin/flandmark_demo.py | 6 | 2152 | import numpy as np
import os
from fnmatch import fnmatch
from py_flandmark import PyFlandmark
from PIL import Image
import ImageDraw
import matplotlib.pyplot as plt
def rgb2gray(rgb):
"""
converts rgb array to grey scale variant
accordingly to fomula taken from wiki
(this function is missing in python)
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def read_bbox_from_txt(file_name):
"""
returns 2x2 matrix coordinates of
left upper and right lower corners
of rectangle that contains face stored
in columns of matrix
"""
f = open(file_name)
str = f.read().replace(',', ' ')
f.close()
ret = np.array(map(int,str.split()) ,dtype=np.int32)
ret = ret.reshape((2,2), order='F')
return ret
DIR = '../../../data/Images/'
JPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]
flmrk = PyFlandmark("../../../data/flandmark_model.xml", False)
for jpg_name in JPGS:
file_name = jpg_name[:-4]
img = Image.open(DIR + jpg_name)
arr = rgb2gray(np.asarray(img))
bbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')
d_landmarks = flmrk.detect(arr, bbox)
n = d_landmarks.shape[1]
print "test detect method"
im = Image.fromarray(arr)
img_dr = ImageDraw.Draw(im)
img_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline="#FF00FF")
r = 2.
for i in xrange(n):
x = d_landmarks[0,i]
y = d_landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect method"
frame = flmrk.get_normalized_frame(arr, bbox)[0]
frame = frame.astype(np.double)
im = Image.fromarray(frame)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect_base method"
landmarks = flmrk.detect_base(frame)
im = Image.fromarray(frame)
img_dr = ImageDraw.Draw(im)
r = 2.
for i in xrange(n):
x = landmarks[0,i]
y = landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test psi method"
psi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)
#flmrk.get_psi(d_landmarks, arr, bbox)
break | gpl-3.0 | -6,195,189,628,938,381,000 | -7,293,660,785,426,561,000 | 22.659341 | 74 | 0.659851 | false |
mzdanieltest/pex | pex/interpreter.py | 52 | 12996 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import os
import re
import subprocess
import sys
from collections import defaultdict
from pkg_resources import Distribution, Requirement, find_distributions
from .base import maybe_requirement
from .compatibility import string
from .tracer import TRACER
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, list):
raise TypeError('Capability must be a list, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash((self._binary, self._identity))
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return (self._binary, self._identity) == (other._binary, other._identity)
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
| apache-2.0 | 7,974,895,620,309,284,000 | -5,527,795,388,632,762,000 | 29.578824 | 99 | 0.657433 | false |
apache/bloodhound | trac/trac/tests/core.py | 2 | 13792 | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from trac.core import *
import unittest
class ITest(Interface):
def test():
"""Dummy function."""
class IOtherTest(Interface):
def other_test():
"""Other dummy function."""
class ComponentTestCase(unittest.TestCase):
def setUp(self):
from trac.core import ComponentManager, ComponentMeta
self.compmgr = ComponentManager()
# Make sure we have no external components hanging around in the
# component registry
self.old_registry = ComponentMeta._registry
ComponentMeta._registry = {}
def tearDown(self):
# Restore the original component registry
from trac.core import ComponentMeta
ComponentMeta._registry = self.old_registry
def test_base_class_not_registered(self):
"""
Make sure that the Component base class does not appear in the component
registry.
"""
from trac.core import ComponentMeta
assert Component not in ComponentMeta._components
self.assertRaises(TracError, self.compmgr.__getitem__, Component)
def test_abstract_component_not_registered(self):
"""
Make sure that a Component class marked as abstract does not appear in
the component registry.
"""
from trac.core import ComponentMeta
class AbstractComponent(Component):
abstract = True
assert AbstractComponent not in ComponentMeta._components
self.assertRaises(TracError, self.compmgr.__getitem__,
AbstractComponent)
def test_unregistered_component(self):
"""
Make sure the component manager refuses to manage classes not derived
from `Component`.
"""
class NoComponent(object):
pass
self.assertRaises(TracError, self.compmgr.__getitem__, NoComponent)
def test_component_registration(self):
"""
Verify that classes derived from `Component` are managed by the
component manager.
"""
class ComponentA(Component):
pass
assert self.compmgr[ComponentA]
assert ComponentA(self.compmgr)
def test_component_identity(self):
"""
Make sure instantiating a component multiple times just returns the
same instance again.
"""
class ComponentA(Component):
pass
c1 = ComponentA(self.compmgr)
c2 = ComponentA(self.compmgr)
assert c1 is c2, 'Expected same component instance'
c2 = self.compmgr[ComponentA]
assert c1 is c2, 'Expected same component instance'
def test_component_initializer(self):
"""
Makes sure that a components' `__init__` method gets called.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'test'
self.assertEqual('test', ComponentA(self.compmgr).data)
ComponentA(self.compmgr).data = 'newtest'
self.assertEqual('newtest', ComponentA(self.compmgr).data)
def test_inherited_component_initializer(self):
"""
Makes sure that a the `__init__` method of a components' super-class
gets called if the component doesn't override it.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'foo'
class ComponentB(ComponentA):
def __init__(self):
self.data = 'bar'
class ComponentC(ComponentB):
pass
self.assertEqual('bar', ComponentC(self.compmgr).data)
ComponentC(self.compmgr).data = 'baz'
self.assertEqual('baz', ComponentC(self.compmgr).data)
def test_implements_called_outside_classdef(self):
"""
Verify that calling implements() outside a class definition raises an
`AssertionError`.
"""
try:
implements()
except AssertionError:
pass
else:
self.fail('Expected AssertionError')
def test_implements_multiple(self):
"""
Verify that a component "implementing" an interface more than once
(e.g. through inheritance) is not called more than once from an
extension point.
"""
log = []
class Parent(Component):
abstract = True
implements(ITest)
class Child(Parent):
implements(ITest)
def test(self):
log.append("call")
class Other(Component):
tests = ExtensionPoint(ITest)
for test in Other(self.compmgr).tests:
test.test()
self.assertEqual(["call"], log)
def test_attribute_access(self):
"""
Verify that accessing undefined attributes on components raises an
`AttributeError`.
"""
class ComponentA(Component):
pass
comp = ComponentA(self.compmgr)
try:
comp.foo
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_nonconforming_extender(self):
"""
Verify that accessing a method of a declared extension point interface
raises a normal `AttributeError` if the component does not implement
the method.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
tests = iter(ComponentA(self.compmgr).tests)
try:
tests.next().test()
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_extension_point_with_no_extension(self):
"""
Verify that accessing an extension point with no extenders returns an
empty list.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
tests = iter(ComponentA(self.compmgr).tests)
self.assertRaises(StopIteration, tests.next)
def test_extension_point_with_one_extension(self):
"""
Verify that a single component extending an extension point can be
accessed through the extension point attribute of the declaring
component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ComponentA(self.compmgr).tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_extension_point_with_two_extensions(self):
"""
Verify that two components extending an extension point can be accessed
through the extension point attribute of the declaring component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
class ComponentC(Component):
implements(ITest)
def test(self):
return 'y'
results = [test.test() for test in ComponentA(self.compmgr).tests]
self.assertEquals(['x', 'y'], sorted(results))
def test_inherited_extension_point(self):
"""
Verify that extension points are inherited to sub-classes.
"""
class BaseComponent(Component):
tests = ExtensionPoint(ITest)
class ConcreteComponent(BaseComponent):
pass
class ExtendingComponent(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ConcreteComponent(self.compmgr).tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_inherited_implements(self):
"""
Verify that a component with a super-class implementing an extension
point interface is also registered as implementing that interface.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ConcreteComponent(BaseComponent):
pass
from trac.core import ComponentMeta
assert ConcreteComponent in ComponentMeta._registry.get(ITest, [])
def test_inherited_implements_multilevel(self):
"""
Verify that extension point interfaces are inherited for more than
one level of inheritance.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ChildComponent(BaseComponent):
implements(IOtherTest)
abstract = True
class ConcreteComponent(ChildComponent):
pass
from trac.core import ComponentMeta
assert ConcreteComponent in ComponentMeta._registry.get(ITest, [])
assert ConcreteComponent in ComponentMeta._registry.get(IOtherTest, [])
def test_component_manager_component(self):
"""
Verify that a component manager can itself be a component with its own
extension points.
"""
from trac.core import ComponentManager
class ManagerComponent(ComponentManager, Component):
tests = ExtensionPoint(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
class Extender(Component):
implements(ITest)
def test(self):
return 'x'
mgr = ManagerComponent('Test', 42)
assert id(mgr) == id(mgr[ManagerComponent])
tests = iter(mgr.tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_component_manager_component_isolation(self):
"""
Verify that a component manager that is also a component will only
be listed in extension points for components instantiated in
its scope.
See bh:comment:5:ticket:438 and #11121
"""
from trac.core import ComponentManager
class ManagerComponent(ComponentManager, Component):
tests = ExtensionPoint(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
class YetAnotherManagerComponent(ComponentManager, Component):
implements(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
# ITest methods
def test(self):
return self.foo + self.bar
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class Extender(Component):
implements(ITest)
def test(self):
return 'x'
mgr = ManagerComponent('Test', 42)
yamc = YetAnotherManagerComponent('y', 'z')
assert yamc[ManagerComponent] is None
assert mgr[YetAnotherManagerComponent] is None
assert yamc[ComponentManager] is None
assert self.compmgr[YetAnotherManagerComponent] is None
assert mgr[ComponentManager] is None
assert self.compmgr[ManagerComponent] is None
self.assertTrue(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(yamc).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(self.compmgr).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(mgr).tests))
self.assertFalse(any(c.__class__ is ManagerComponent
for c in ComponentA(yamc).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in mgr.tests))
results = [test.test() for test in ComponentA(yamc).tests]
self.assertEquals(['x', 'yz'], sorted(results))
results = [test.test() for test in ComponentA(self.compmgr).tests]
self.assertEquals(['x'], sorted(results))
results = [test.test() for test in ComponentA(mgr).tests]
self.assertEquals(['x'], sorted(results))
results = [test.test() for test in mgr.tests]
self.assertEquals(['x'], sorted(results))
def test_instantiation_doesnt_enable(self):
"""
Make sure that a component disabled by the ComponentManager is not
implicitly enabled by instantiating it directly.
"""
from trac.core import ComponentManager
class DisablingComponentManager(ComponentManager):
def is_component_enabled(self, cls):
return False
class ComponentA(Component):
pass
mgr = DisablingComponentManager()
instance = ComponentA(mgr)
self.assertEqual(None, mgr[ComponentA])
def suite():
return unittest.makeSuite(ComponentTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,012,898,114,093,702,000 | 1,906,011,627,909,842,000 | 34.638243 | 80 | 0.609411 | false |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/applyhost/workflows/update_instance.py | 11 | 5708 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.utils import filters
INDEX_URL = "horizon:projects:instances:index"
ADD_USER_URL = "horizon:projects:instances:create_user"
INSTANCE_SEC_GROUP_SLUG = "update_security_groups"
class UpdateInstanceSecurityGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateInstanceSecurityGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve security group list. '
'Please try again later.')
context = args[0]
instance_id = context.get('instance_id', '')
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = 'member'
# Get list of available security groups
all_groups = []
try:
all_groups = api.network.security_group_list(request)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
instance_groups = []
try:
instance_groups = api.network.server_security_groups(request,
instance_id)
except Exception:
exceptions.handle(request, err_msg)
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = [group.id
for group in instance_groups]
def handle(self, request, data):
instance_id = data['instance_id']
wanted_groups = map(filters.get_int_or_uuid, data['wanted_groups'])
try:
api.network.server_update_security_groups(request, instance_id,
wanted_groups)
except Exception as e:
exceptions.handle(request, str(e))
return False
return True
class Meta:
name = _("Security Groups")
slug = INSTANCE_SEC_GROUP_SLUG
class UpdateInstanceSecurityGroups(workflows.UpdateMembersStep):
action_class = UpdateInstanceSecurityGroupsAction
help_text = _("Add and remove security groups to this project "
"from the list of available security groups.")
available_list_title = _("All Security Groups")
members_list_title = _("Instance Security Groups")
no_available_text = _("No security groups found.")
no_members_text = _("No security groups enabled.")
show_roles = False
depends_on = ("instance_id",)
contributes = ("wanted_groups",)
def contribute(self, data, context):
request = self.workflow.request
if data:
field_name = self.get_member_field_name('member')
context["wanted_groups"] = request.POST.getlist(field_name)
return context
class UpdateInstanceInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
max_length=255)
def handle(self, request, data):
try:
api.nova.server_update(request,
data['instance_id'],
data['name'])
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class Meta:
name = _("Info")
slug = 'instance_info'
help_text = _("Edit the instance details.")
class UpdateInstanceInfo(workflows.Step):
action_class = UpdateInstanceInfoAction
depends_on = ("instance_id",)
contributes = ("name",)
class UpdateInstance(workflows.Workflow):
slug = "update_instance"
name = _("Edit Instance")
finalize_button_name = _("Save")
success_message = _('Modified instance "%s".')
failure_message = _('Unable to modify instance "%s".')
success_url = "horizon:project:instances:index"
default_steps = (UpdateInstanceInfo,
UpdateInstanceSecurityGroups)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown instance')
# NOTE(kspear): nova doesn't support instance security group management
# by an admin. This isn't really the place for this code,
# but the other ways of special-casing this are even messier.
class AdminUpdateInstance(UpdateInstance):
success_url = "horizon:admin:instances:index"
default_steps = (UpdateInstanceInfo,)
| gpl-2.0 | 2,848,015,027,501,635,600 | -4,524,860,644,883,216,000 | 37.308725 | 78 | 0.623861 | false |
kans/birgo | deps/breakpad/src/tools/gyp/test/rules/gyptest-default.py | 137 | 1063 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', "Hello from file1.in\n")
test.must_match('relocate/src/subdir2/file2.out', "Hello from file2.in\n")
test.pass_test()
| apache-2.0 | 3,527,195,719,333,659,000 | -5,225,576,669,644,200,000 | 21.617021 | 74 | 0.714017 | false |
gurneyalex/hr | __unported__/hr_resume/__init__.py | 28 | 1047 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import hr_resume
from . import report
| agpl-3.0 | 5,323,576,473,664,584,000 | -2,144,779,583,367,019,800 | 44.521739 | 79 | 0.612225 | false |
bsmithers/CLIgraphs | histogram.py | 1 | 8515 | #!/usr/bin/env python2
from __future__ import division
import itertools
import math
import sys
import numpy
import scipy.stats
import cligraph
import utils
"""
TODO:
- Auto-detect number of bins
- Fixed width or variable width bins
- Stacked bins, overlapped bins or bins next to each other
- Change which side of bin is open (default: bins are half-open, closed on left, except final bin
which is closed both sides)
"""
class Histogram(cligraph.CLIGraph):
def __init__(self, **kwargs):
super(Histogram, self).__init__(**kwargs)
self.data = []
self.data_params = []
def check_args(self, cli_args, inputs):
super(Histogram, self).check_args(cli_args, inputs)
self.fields = utils.get_columns_from_string(cli_args.field)
self.colours = itertools.cycle(cli_args.colours.split(','))
self.markers = itertools.cycle(cli_args.markers)
self.alphas = utils.map_csv_to_cycle(cli_args.alpha, float)
self.histtypes = itertools.cycle(cli_args.hist_type.split(','))
if cli_args.legends:
self.legends = itertools.cycle(cli_args.legends)
else:
self.legends = itertools.cycle([None])
# Should we store all data and render only after reading everything?
self.store = False
if cli_args.unify_bins:
self.store = True
# Set bin defaults if none given
if not cli_args.bins and not cli_args.bin_size:
cli_args.bins = 10
return bool(self.fields) and bool(self.alphas)
def get_parser(self):
parser = super(Histogram, self).get_parser()
# Inputs
parser.add_argument('-f', '--field', help='Column to read values from. (1-based indexing). \
Unix cut format for multiple columns. Default = 1', default='1')
# Histogram setup
parser.add_argument('--normed', help='Normalise frequency?', action="store_true",
default=False)
parser.add_argument("--cumulative", help="Cumulative Frequency? Default=0",
action="store_true", default=False)
parser.add_argument("--logscale", help="Use a logarithmic y-axs", action="store_true",
default=False)
parser.add_argument("--legends", nargs="+", help="Dataset legends", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--bins', help='Number of bins. If not given and bin-size not \
given, this will default to 10', type=int)
group.add_argument('-z', '--bin-size', help='Size of each bin', type=float)
parser.add_argument('-u', '--unify-bins', action="store_true", default=False,
help='Unify bin sizes across different input sources')
parser.add_argument('--disable-bin-offset', help="By default, bins are offset by half their\
width to help bins straddle integer values for example",
action="store_true", default=False)
# Visual
parser.add_argument('-c', '--colours', default='r,g,b,c,y,m,k')
parser.add_argument('-m', '--markers', default=' ')
parser.add_argument('-a', '--alpha', default='0.5')
parser.add_argument('-y', '--hist-type', default='bar')
return parser
def input_started_hook(self, axes, cli_args, inp, inp_index):
"""
Setup data structures
"""
if not self.store:
self.data = []
self.data_params = []
for _ in self.fields:
self.data.append([])
self.data_params.append({'min': float('inf'), 'max': float('-inf')})
def input_ended_hook(self, axes, cli_args, inp, inp_index):
"""
Draw histogram at end of input unless we have to store data (e.g. for bin calculation)
"""
if self.store:
return
self.__draw_histogram(axes, cli_args)
def process_input_by_fields(self, axes, cli_args, inp, inp_index, fields):
"""
Store value for each dataset
"""
for index, column in enumerate(self.fields):
value = float(fields[column])
if self.store:
index = inp_index * len(self.fields) + index
# Store min/max values for bin work
self.data_params[index]['min'] = min(value, self.data_params[index]['min'])
self.data_params[index]['max'] = max(value, self.data_params[index]['max'])
self.data[index].append(float(fields[column]))
def process_input(self, axes, cli_args, inputs):
"""
If we are doing bin-size auto detection and require consist bin size
across different inputs, we will have to read all data first before
we can process
"""
super(Histogram, self).process_input(axes, cli_args, inputs)
if self.store:
self.__draw_histogram(axes, cli_args)
def apply_lables_and_titles(self, fig, axes, cli_args):
"""
Add legend if we have them
TODO: This can probably by done more generally, just have to be careful about
plots with multiple axes.
"""
super(Histogram, self).apply_lables_and_titles(fig, axes, cli_args)
if cli_args.legends:
axes.legend()
def __draw_histogram(self, axes, cli_args):
"""
Plot histograms for all datasets in current data
"""
for index, dataset in enumerate(self.data):
bins = self.__get_bins(cli_args, index)
axes.hist(dataset, bins, facecolor=self.colours.next(), alpha=self.alphas.next(),
normed=cli_args.normed, cumulative=cli_args.cumulative,
log=cli_args.logscale, label=self.legends.next(), hatch=self.markers.next(),
histtype=self.histtypes.next())
def __get_bins(self, cli_args, index):
"""
Get the bin histogram parameter for the data at the given index. Use the supplied
number of bins if given. Otherwise, calculate based on the supplied bin width.
"""
# Short-circuit if we are given number of bins and not using equal bins
if cli_args.bins and not self.store:
return cli_args.bins
# Get the minimum and maximum values either for this dataset or for all datasets
# if we are post-processing
min_val = self.data_params[index]['min']
max_val = self.data_params[index]['max']
if self.store:
min_val = min([self.data_params[i]['min'] for i in range(0, len(self.data_params))])
max_val = max([self.data_params[i]['max'] for i in range(0, len(self.data_params))])
# For a fixed number of bins, do a linear fit. Otherwise, use a range with bin size
if cli_args.bins:
# Fit one extra value to include right edge (same as normal histogram behaviour)
return numpy.linspace(min_val, max_val, cli_args.bins + 1)
# Compute bins. Do not use range as values may be floats.
# Lowest bin should be the largest multiple of bin_size that is <= min_val
# Highest bin should be smallest multiple of bin_size that is >= max_val
bins = []
i = math.floor(min_val / cli_args.bin_size) * cli_args.bin_size
# By default, bits are offset by half their width from the lowest value rather
# than by their full width
if not cli_args.disable_bin_offset:
i -= cli_args.bin_size / 2
else:
i -= cli_args.bin_size
while i <= max_val:
bins.append(i)
i += cli_args.bin_size
bins.append(i) # Add final bin
# Combine offscreen bins for faster renders
if cli_args.min_x and cli_args.min_x > min_val:
first_onscreen = max([index for index, b in enumerate(bins) if b <= cli_args.min_x])
# Include the first bin so that this captures everything offscren
if first_onscreen >= 2:
bins = [bins[0]] + bins[first_onscreen:]
if cli_args.max_x and cli_args.max_x < max_val:
last_onscreen = min([index for index, b in enumerate(bins) if b > cli_args.max_x])
if last_onscreen < len(bins) - 1:
bins = bins[:last_onscreen] + [bins[-1]]
return bins
if __name__ == '__main__':
hist = Histogram(grid_default_on=True)
hist.graphify()
| agpl-3.0 | -9,145,534,039,556,424,000 | 5,021,140,353,503,267,000 | 37.881279 | 100 | 0.593188 | false |
vivekkodu/robotframework-selenium2library | src/Selenium2Library/keywords/_selectelement.py | 9 | 14906 | from selenium.webdriver.support.ui import Select
from keywordgroup import KeywordGroup
class _SelectElementKeywords(KeywordGroup):
# Public
def get_list_items(self, locator):
"""Returns the values in the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options(locator)
return self._get_labels_for_options(options)
def get_selected_list_label(self, locator):
"""Returns the visible label of the selected element from the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.text
def get_selected_list_labels(self, locator):
"""Returns the visible labels of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_labels_for_options(options)
def get_selected_list_value(self, locator):
"""Returns the value of the selected element from the select list identified by `locator`.
Return value is read from `value` attribute of the selected element.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.get_attribute('value')
def get_selected_list_values(self, locator):
"""Returns the values of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_values_for_options(options)
def list_selection_should_be(self, locator, *items):
"""Verifies the selection of select list identified by `locator` is exactly `*items`.
If you want to test that no option is selected, simply give no `items`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) [ %s ]" % " | ".join(items) or "no options"
self._info("Verifying list '%s' has %s selected." % (locator, items_str))
items = list(items)
self.page_should_contain_list(locator)
select, options = self._get_select_list_options_selected(locator)
if not items and len(options) == 0:
return
selected_values = self._get_values_for_options(options)
selected_labels = self._get_labels_for_options(options)
err = "List '%s' should have had selection [ %s ] but it was [ %s ]" \
% (locator, ' | '.join(items), ' | '.join(selected_labels))
for item in items:
if item not in selected_values + selected_labels:
raise AssertionError(err)
for selected_value, selected_label in zip(selected_values, selected_labels):
if selected_value not in items and selected_label not in items:
raise AssertionError(err)
def list_should_have_no_selections(self, locator):
"""Verifies select list identified by `locator` has no selections.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
self._info("Verifying list '%s' has no selection." % locator)
select, options = self._get_select_list_options_selected(locator)
if options:
selected_labels = self._get_labels_for_options(options)
items_str = " | ".join(selected_labels)
raise AssertionError("List '%s' should have had no selection "
"(selection was [ %s ])" % (locator, items_str))
def page_should_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_contain_element(locator, 'list', message, loglevel)
def page_should_not_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_not_contain_element(locator, 'list', message, loglevel)
def select_all_from_list(self, locator):
"""Selects all values from multi-select list identified by `id`.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._info("Selecting all options from list '%s'." % locator)
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Select all from list' works only for multiselect lists.")
for i in range(len(select.options)):
select.select_by_index(i)
def select_from_list(self, locator, *items):
"""Selects `*items` from list identified by `locator`
If more than one value is given for a single-selection list, the last
value will be selected. If the target list is a multi-selection list,
and `*items` is an empty list, all values of the list will be selected.
*items try to select by value then by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not items:
for i in range(len(select.options)):
select.select_by_index(i)
return
for item in items:
try: select.select_by_value(item)
except:
try: select.select_by_visible_text(item)
except: continue
def select_from_list_by_index(self, locator, *indexes):
"""Selects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for index in indexes:
select.select_by_index(int(index))
def select_from_list_by_value(self, locator, *values):
"""Selects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for value in values:
select.select_by_value(value)
def select_from_list_by_label(self, locator, *labels):
"""Selects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for label in labels:
select.select_by_visible_text(label)
def unselect_from_list(self, locator, *items):
"""Unselects given values from select list identified by locator.
As a special case, giving empty list as `*items` will remove all
selections.
*items try to unselect by value AND by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
if not items:
select.deselect_all()
return
select, options = self._get_select_list_options(select)
for item in items:
select.deselect_by_value(item)
select.deselect_by_visible_text(item)
def unselect_from_list_by_index(self, locator, *indexes):
"""Unselects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for index in indexes:
select.deselect_by_index(int(index))
def unselect_from_list_by_value(self, locator, *values):
"""Unselects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for value in values:
select.deselect_by_value(value)
def unselect_from_list_by_label(self, locator, *labels):
"""Unselects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for label in labels:
select.deselect_by_visible_text(label)
# Private
def _get_labels_for_options(self, options):
labels = []
for option in options:
labels.append(option.text)
return labels
def _get_select_list(self, locator):
el = self._element_find(locator, True, True, 'select')
return Select(el)
def _get_select_list_options(self, select_list_or_locator):
if isinstance(select_list_or_locator, Select):
select = select_list_or_locator
else:
select = self._get_select_list(select_list_or_locator)
return select, select.options
def _get_select_list_options_selected(self, locator):
select = self._get_select_list(locator)
# TODO: Handle possible exception thrown by all_selected_options
return select, select.all_selected_options
def _get_values_for_options(self, options):
values = []
for option in options:
values.append(option.get_attribute('value'))
return values
def _is_multiselect_list(self, select):
multiple_value = select.get_attribute('multiple')
if multiple_value is not None and (multiple_value == 'true' or multiple_value == 'multiple'):
return True
return False
def _unselect_all_options_from_multi_select_list(self, select):
self._current_browser().execute_script("arguments[0].selectedIndex = -1;", select)
def _unselect_option_from_multi_select_list(self, select, options, index):
if options[index].is_selected():
options[index].click()
| apache-2.0 | 5,964,303,006,888,955,000 | 3,041,594,759,930,759,700 | 40.988732 | 116 | 0.629344 | false |
WUJISHANXIA/wujishanxia | bootcamp/articles/tests/test_views.py | 1 | 6318 | from django.contrib.auth import get_user_model
from django.http import HttpResponseBadRequest
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from bootcamp.articles.models import Article
class TestViews(TestCase):
"""
Includes tests for all the functionality
associated with Views
"""
def setUp(self):
self.client = Client()
self.other_client = Client()
self.user = get_user_model().objects.create_user(
username='test_user',
email='test@gmail.com',
password='top_secret'
)
self.other_user = get_user_model().objects.create_user(
username='other_test_user',
email='other_test@gmail.com',
password='top_secret'
)
self.client.login(username='test_user', password='top_secret')
self.other_client.login(
username='other_test_user', password='top_secret')
self.title = 'A really nice to-be title'
self.content = '''This is a really good content, just if somebody published
it, that would be awesome, but no, nobody wants to publish it, because
they know this is just a test, and you know than nobody wants to
publish a test, just a test; everybody always wants the real deal.'''
self.article = Article.objects.create(
create_user=self.user, title='A really nice title',
content=self.content, tags='list, lists', status='P')
def test_index_articles(self):
response = self.client.get(reverse('articles'))
self.assertEqual(response.status_code, 200)
response_no_art = self.client.get(reverse(
'article', kwargs={'slug': 'no-slug'}))
self.assertEqual(response_no_art.status_code, 404)
def test_individual_article(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'P'})
response_art = self.client.get(
reverse('article', kwargs={'slug': 'a-really-nice-to-be-title'}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response_art.status_code, 200)
self.assertEqual(response_art.context['article'].slug,
'a-really-nice-to-be-title')
def test_drafts_workflow(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'D'
})
resp = self.client.get(reverse('drafts'))
self.assertEqual(response.status_code, 302)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.context['drafts'][0].slug,
'a-really-nice-to-be-title')
def test_filter_by_tag(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list',
'status': 'P'})
response_tag = self.client.get(
reverse('tag', kwargs={'tag_name': 'list'}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response_tag.status_code, 200)
self.assertTrue(
'list' in list(response_tag.context['popular_tags'])[0])
def test_edits_article(self):
"""
"""
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'P'
})
art = Article.objects.latest('create_date')
art_content = art.content
response_two = self.client.post(
reverse('edit_article', kwargs={'pk': art.id}),
{'content': 'some_different_content_here',
'title': self.title,
'tags': 'list, lists',
'status': 'P'})
art.refresh_from_db()
self.assertEqual(response.status_code, 302)
self.assertEqual(response_two.status_code, 302)
self.assertNotEqual(art_content, art.content)
def test_empty_preview(self):
request = self.client.post(reverse('preview'), {'content': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertEqual(request.content, b'Nothing to display :(')
def test_preview_with_text(self):
content = '<p>This is a really good content.</p>'
request = self.client.post(reverse('preview'), {'content': content},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertEqual(
request.content,
b'<p><p>This is a really good content.</p></p>')
def test_bad_request_preview(self):
request = self.client.get(reverse('preview'))
self.assertEqual(request.status_code, 400)
self.assertTrue(isinstance(request, HttpResponseBadRequest))
def test_comment_view(self):
request = self.client.post(reverse('comment'),
{'article': self.article.id,
'comment': 'This is a good comment'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertTrue(b'This is a good comment' in request.content)
def test_bad_request_comment(self):
request = self.client.get(reverse('comment'))
self.assertEqual(request.status_code, 400)
self.assertTrue(isinstance(request, HttpResponseBadRequest))
| mit | -5,763,196,201,873,789,000 | -5,308,305,283,069,982,000 | 46.149254 | 83 | 0.534188 | false |
Vizerai/grpc | tools/buildgen/plugins/generate_vsprojects.py | 19 | 3036 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Buildgen vsprojects plugin.
This parses the list of libraries, and generates globals "vsprojects"
and "vsproject_dict", to be used by the visual studio generators.
"""
import hashlib
import re
def mako_plugin(dictionary):
"""The exported plugin code for generate_vsprojeccts
We want to help the work of the visual studio generators.
"""
libs = dictionary.get('libs', [])
targets = dictionary.get('targets', [])
for lib in libs:
lib['is_library'] = True
for target in targets:
target['is_library'] = False
projects = []
projects.extend(libs)
projects.extend(targets)
for target in projects:
if 'build' in target and target['build'] == 'test':
default_test_dir = 'test'
else:
default_test_dir = '.'
if 'vs_config_type' not in target:
if 'build' in target and target['build'] == 'test':
target['vs_config_type'] = 'Application'
else:
target['vs_config_type'] = 'StaticLibrary'
if 'vs_packages' not in target:
target['vs_packages'] = []
if 'vs_props' not in target:
target['vs_props'] = []
target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
if target.get('vs_project_guid',
None) is None and 'windows' in target.get(
'platforms', ['windows']):
name = target['name']
guid = re.sub('(........)(....)(....)(....)(.*)',
r'{\1-\2-\3-\4-\5}',
hashlib.md5(name).hexdigest())
target['vs_project_guid'] = guid.upper()
# Exclude projects without a visual project guid, such as the tests.
projects = [
project for project in projects if project.get('vs_project_guid', None)
]
projects = [
project for project in projects
if project['language'] != 'c++' or project['build'] == 'all' or
project['build'] == 'protoc' or (project['language'] == 'c++' and (
project['build'] == 'test' or project['build'] == 'private'))
]
project_dict = dict([(p['name'], p) for p in projects])
packages = dictionary.get('vspackages', [])
packages_dict = dict([(p['name'], p) for p in packages])
dictionary['vsprojects'] = projects
dictionary['vsproject_dict'] = project_dict
dictionary['vspackages_dict'] = packages_dict
| apache-2.0 | 9,084,435,950,611,500,000 | -3,811,037,354,795,191,300 | 34.717647 | 79 | 0.596838 | false |
overtherain/scriptfile | software/googleAppEngine/lib/PyAMF/pyamf/tests/test_xml.py | 26 | 2009 | # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for XML library integration
@since: 0.4
"""
import unittest
import pyamf.xml
from pyamf import util
class ElementTreeTestCase(unittest.TestCase):
"""
Tests the type mappings.
"""
xml = '<foo bar="baz" />'
def check_amf0(self, bytes, xml):
b = util.BufferedByteStream(bytes)
self.assertEqual(b.read_char(), 15)
l = b.read_ulong()
self.assertEqual(l, b.remaining())
self.assertEqual(b.read(), xml)
def check_amf3(self, bytes, xml):
b = util.BufferedByteStream(bytes)
self.assertEqual(b.read_char(), 11)
l = b.read_uchar()
self.assertEqual(l >> 1, b.remaining())
self.assertEqual(b.read(), xml)
for mod in pyamf.xml.ETREE_MODULES:
name = 'test_' + mod.replace('.', '_')
def check_etree(self):
# holy hack batman
import inspect
mod = inspect.stack()[1][0].f_locals['testMethod'].__name__[5:]
mod = mod.replace('_', '.')
try:
etree = util.get_module(mod)
except ImportError:
self.skipTest('%r is not available' % (mod,))
element = etree.fromstring(self.xml)
xml = etree.tostring(element)
old = pyamf.set_default_etree(etree)
if old:
self.addCleanup(lambda x: pyamf.set_default_etree(x), old)
bytes = pyamf.encode(element, encoding=pyamf.AMF0).getvalue()
self.check_amf0(bytes, xml)
new_element = pyamf.decode(bytes, encoding=pyamf.AMF0).next()
self.assertIdentical(type(element), type(new_element))
bytes = pyamf.encode(element, encoding=pyamf.AMF3).getvalue()
self.check_amf3(bytes, xml)
new_element = pyamf.decode(bytes, encoding=pyamf.AMF3).next()
self.assertIdentical(type(element), type(new_element))
check_etree.__name__ = name
setattr(ElementTreeTestCase, name, check_etree)
| mit | -7,459,719,517,968,046,000 | -5,202,430,512,655,001,000 | 22.916667 | 71 | 0.602787 | false |
albertrdixon/CouchPotatoServer | libs/bs4/builder/__init__.py | 447 | 11151 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| gpl-3.0 | -596,672,860,422,581,800 | -3,462,446,341,128,346,600 | 33.738318 | 80 | 0.602368 | false |
CReSIS/OPS | conf/tools/createUserExample.py | 1 | 1232 | # After editing the script (e.g. with gedit), run this script by following these steps:
# (1) Switch to root user: sudo -i
# (2) Activate VirtualEnv: source /usr/bin/venv/bin/activate
# (3) Open a Python shell with Django environment: python /var/django/ops/manage.py shell
# (4) Run this script: execfile('createUserExample.py')
# (5) Press ctrl-d or type quit() or exit()
from django.contrib.auth.models import User
# set new user properties
userName='anonymous'
userEmail='anonymous@ku.edu'
userPassword='anonymous'
# create the new user
newUser = User.objects.create_user(userName, userEmail, userPassword)
# set the user profile options (example for cresis superuser)
newUser.profile.rds_layer_groups = [1,2]
newUser.profile.accum_layer_groups = [1,2]
newUser.profile.kuband_layer_groups = [1,2]
newUser.profile.snow_layer_groups = [1,2]
newUser.profile.rds_season_groups = [1,2]
newUser.profile.accum_season_groups = [1,2]
newUser.profile.kuband_season_groups = [1,2]
newUser.profile.snow_season_groups = [1,2]
newUser.profile.layerGroupRelease = True
newUser.profile.bulkDeleteData = False
newUser.profile.createData = True
newUser.profile.seasonRelease = True
# save the user profile
newUser.profile.save()
| gpl-3.0 | -698,008,746,618,450,200 | 5,637,712,175,438,252,000 | 36.333333 | 89 | 0.75487 | false |
newemailjdm/pybrain | pybrain/rl/agents/agent.py | 31 | 1153 | __author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.utilities import abstractMethod, Named
class Agent(Named):
""" An agent is an entity capable of producing actions, based on previous observations.
Generally it will also learn from experience. It can interact directly with a Task.
"""
def integrateObservation(self, obs):
""" Integrate the current observation of the environment.
:arg obs: The last observation returned from the environment
:type obs: by default, this is assumed to be a numpy array of doubles
"""
pass
def getAction(self):
""" Return a chosen action.
:rtype: by default, this is assumed to ba a numpy array of doubles.
:note: This method is abstract and needs to be implemented.
"""
abstractMethod()
def giveReward(self, r):
""" Reward or punish the agent.
:key r: reward, if C{r} is positive, punishment if C{r} is negative
:type r: double
"""
pass
def newEpisode(self):
""" Inform the agent that a new episode has started. """
pass
| bsd-3-clause | 1,802,494,364,240,516,400 | -5,530,399,846,390,908,000 | 33.939394 | 91 | 0.620121 | false |
motion2015/a3 | common/djangoapps/embargo/migrations/0005_add_courseaccessrulehistory.py | 102 | 7906 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseAccessRuleHistory'
db.create_table('embargo_courseaccessrulehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('snapshot', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('embargo', ['CourseAccessRuleHistory'])
def backwards(self, orm):
# Deleting model 'CourseAccessRuleHistory'
db.delete_table('embargo_courseaccessrulehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.courseaccessrulehistory': {
'Meta': {'object_name': 'CourseAccessRuleHistory'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapshot': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo'] | agpl-3.0 | 3,090,032,969,767,051,000 | 9,163,422,943,648,449,000 | 68.359649 | 182 | 0.560207 | false |
sanyaade-teachings/oppia | core/domain/exp_domain.py | 6 | 68060 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for an exploration, its states, and their constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used."""
__author__ = 'Sean Lip'
import copy
import logging
import re
import string
from core.domain import fs_domain
from core.domain import html_cleaner
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import param_domain
from core.domain import rule_domain
from core.domain import skins_services
import feconf
import jinja_utils
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
STATE_PROPERTY_PARAM_CHANGES = 'param_changes'
STATE_PROPERTY_CONTENT = 'content'
STATE_PROPERTY_INTERACTION_ID = 'widget_id'
STATE_PROPERTY_INTERACTION_CUST_ARGS = 'widget_customization_args'
STATE_PROPERTY_INTERACTION_HANDLERS = 'widget_handlers'
# Kept for legacy purposes; not used anymore.
STATE_PROPERTY_INTERACTION_STICKY = 'widget_sticky'
def _is_interaction_terminal(interaction_id):
"""Returns whether the given interaction id marks the end of an
exploration.
"""
return interaction_registry.Registry.get_interaction_by_id(
interaction_id).is_terminal
class ExplorationChange(object):
"""Domain object class for an exploration change.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
exploration snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
STATE_PROPERTIES = (
STATE_PROPERTY_PARAM_CHANGES,
STATE_PROPERTY_CONTENT,
STATE_PROPERTY_INTERACTION_ID,
STATE_PROPERTY_INTERACTION_CUST_ARGS,
STATE_PROPERTY_INTERACTION_STICKY,
STATE_PROPERTY_INTERACTION_HANDLERS)
EXPLORATION_PROPERTIES = (
'title', 'category', 'objective', 'language_code', 'tags',
'blurb', 'author_notes', 'param_specs', 'param_changes',
'default_skin_id', 'init_state_name')
def __init__(self, change_dict):
"""Initializes an ExplorationChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_state' (with state_name)
- 'rename_state' (with old_state_name and new_state_name)
- 'delete_state' (with state_name)
- 'edit_state_property' (with state_name, property_name, new_value and,
optionally, old_value)
- 'edit_exploration_property' (with property_name, new_value and,
optionally, old_value)
For a state, property_name must be one of STATE_PROPERTIES. For an
exploration, property_name must be one of EXPLORATION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == 'add_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'rename_state':
self.old_state_name = change_dict['old_state_name']
self.new_state_name = change_dict['new_state_name']
elif self.cmd == 'delete_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'edit_state_property':
if change_dict['property_name'] not in self.STATE_PROPERTIES:
raise Exception('Invalid change_dict: %s' % change_dict)
self.state_name = change_dict['state_name']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == 'edit_exploration_property':
if (change_dict['property_name'] not in
self.EXPLORATION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class ExplorationCommitLogEntry(object):
"""Value object representing a commit to an exploration."""
def __init__(
self, created_on, last_updated, user_id, username, exploration_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.exploration_id = exploration_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""This omits created_on, user_id and (for now) commit_cmds."""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'exploration_id': self.exploration_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class Content(object):
"""Value object representing non-interactive content."""
def to_dict(self):
return {'type': self.type, 'value': self.value}
@classmethod
def from_dict(cls, content_dict):
return cls(content_dict['type'], content_dict['value'])
def __init__(self, content_type, value=''):
self.type = content_type
self.value = html_cleaner.clean(value)
self.validate()
def validate(self):
# TODO(sll): Add HTML sanitization checking.
# TODO(sll): Validate customization args for rich-text components.
if not self.type == 'text':
raise utils.ValidationError('Invalid content type: %s' % self.type)
if not isinstance(self.value, basestring):
raise utils.ValidationError(
'Invalid content value: %s' % self.value)
def to_html(self, params):
"""Exports this content object to an HTML string.
The content object is parameterized using the parameters in `params`.
"""
if not isinstance(params, dict):
raise Exception(
'Expected context params for parsing content to be a dict, '
'received %s' % params)
return html_cleaner.clean(jinja_utils.parse_string(self.value, params))
class RuleSpec(object):
"""Value object representing a rule specification."""
def to_dict(self):
return {
'definition': self.definition,
'dest': self.dest,
'feedback': self.feedback,
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
}
def to_dict_with_obj_type(self):
dict_with_obj_type = self.to_dict()
dict_with_obj_type['obj_type'] = self.obj_type
return dict_with_obj_type
@classmethod
def from_dict_and_obj_type(cls, rulespec_dict, obj_type):
return cls(
rulespec_dict['definition'],
rulespec_dict['dest'],
rulespec_dict['feedback'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in rulespec_dict['param_changes']],
obj_type,
)
def __init__(self, definition, dest, feedback, param_changes, obj_type):
# A dict specifying the rule definition. E.g.
#
# {'rule_type': 'default'}
#
# or
#
# {
# 'rule_type': 'atomic',
# 'name': 'LessThan',
# 'subject': 'answer',
# 'inputs': {'x': 5}}
# }
#
self.definition = definition
# Id of the destination state.
# TODO(sll): Check that this state is END_DEST or actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback or []
self.feedback = [
html_cleaner.clean(feedback_item)
for feedback_item in self.feedback]
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
self.obj_type = obj_type
@property
def is_default(self):
"""Returns True if this spec corresponds to the default rule."""
return self.definition['rule_type'] == 'default'
@property
def is_generic(self):
"""Returns whether this rule is generic."""
if self.is_default:
return True
return rule_domain.is_generic(self.obj_type, self.definition['name'])
def get_feedback_string(self):
"""Returns a (possibly empty) string with feedback for this rule."""
return utils.get_random_choice(self.feedback) if self.feedback else ''
def __str__(self):
"""Returns a string representation of a rule (for the stats log)."""
if self.definition['rule_type'] == rule_domain.DEFAULT_RULE_TYPE:
return 'Default'
else:
# TODO(sll): Treat non-atomic rules too.
param_list = [utils.to_ascii(val) for
(key, val) in self.definition['inputs'].iteritems()]
return '%s(%s)' % (self.definition['name'], ','.join(param_list))
@classmethod
def get_default_rule_spec(cls, state_name, obj_type):
return RuleSpec({'rule_type': 'default'}, state_name, [], [], obj_type)
def validate(self):
if not isinstance(self.definition, dict):
raise utils.ValidationError(
'Expected rulespec definition to be a dict, received %s'
% self.definition)
if not isinstance(self.dest, basestring):
raise utils.ValidationError(
'Expected rulespec dest to be a string, received %s'
% self.dest)
if not self.dest:
raise utils.ValidationError(
'Every rulespec should have a destination.')
if not isinstance(self.feedback, list):
raise utils.ValidationError(
'Expected rulespec feedback to be a list, received %s'
% self.feedback)
for feedback_item in self.feedback:
if not isinstance(feedback_item, basestring):
raise utils.ValidationError(
'Expected rulespec feedback item to be a string, received '
'%s' % feedback_item)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected rulespec param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
@classmethod
def validate_rule_definition(cls, rule_definition, exp_param_specs):
ATOMIC_RULE_DEFINITION_SCHEMA = [
('inputs', dict), ('name', basestring), ('rule_type', basestring),
('subject', basestring)]
COMPOSITE_RULE_DEFINITION_SCHEMA = [
('children', list), ('rule_type', basestring)]
DEFAULT_RULE_DEFINITION_SCHEMA = [('rule_type', basestring)]
ALLOWED_COMPOSITE_RULE_TYPES = [
rule_domain.AND_RULE_TYPE, rule_domain.OR_RULE_TYPE,
rule_domain.NOT_RULE_TYPE]
if 'rule_type' not in rule_definition:
raise utils.ValidationError(
'Rule definition %s contains no rule type.' % rule_definition)
rule_type = rule_definition['rule_type']
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, DEFAULT_RULE_DEFINITION_SCHEMA)
elif rule_type == rule_domain.ATOMIC_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, ATOMIC_RULE_DEFINITION_SCHEMA)
if (rule_definition['subject'] not in exp_param_specs
and rule_definition['subject'] != 'answer'):
raise utils.ValidationError(
'Unrecognized rule subject: %s' %
rule_definition['subject'])
else:
if rule_type not in ALLOWED_COMPOSITE_RULE_TYPES:
raise utils.ValidationError(
'Unsupported rule type %s.' % rule_type)
utils.verify_dict_keys_and_types(
rule_definition, COMPOSITE_RULE_DEFINITION_SCHEMA)
for child_rule in rule_definition['children']:
cls.validate_rule_definition(child_rule, exp_param_specs)
DEFAULT_RULESPEC_STR = 'Default'
class AnswerHandlerInstance(object):
"""Value object for an answer event stream (submit, click ,drag, etc.)."""
def to_dict(self):
return {
'name': self.name,
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs]
}
@classmethod
def from_dict_and_obj_type(cls, handler_dict, obj_type):
return cls(
handler_dict['name'],
[RuleSpec.from_dict_and_obj_type(rs, obj_type)
for rs in handler_dict['rule_specs']],
)
def __init__(self, name, rule_specs=None):
if rule_specs is None:
rule_specs = []
self.name = name
self.rule_specs = [RuleSpec(
rule_spec.definition, rule_spec.dest, rule_spec.feedback,
rule_spec.param_changes, rule_spec.obj_type
) for rule_spec in rule_specs]
@property
def default_rule_spec(self):
"""The default rule spec."""
assert self.rule_specs[-1].is_default
return self.rule_specs[-1]
@classmethod
def get_default_handler(cls, state_name, obj_type):
return cls('submit', [
RuleSpec.get_default_rule_spec(state_name, obj_type)])
def validate(self):
if self.name != 'submit':
raise utils.ValidationError(
'Unexpected answer handler name: %s' % self.name)
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer handler rule specs to be a list, received %s'
% self.rule_specs)
if len(self.rule_specs) < 1:
raise utils.ValidationError(
'There must be at least one rule spec for each answer handler.'
% self.rule_specs)
for rule_spec in self.rule_specs:
rule_spec.validate()
class InteractionInstance(object):
"""Value object for an instance of an interaction."""
# The default interaction used for a new state.
_DEFAULT_INTERACTION_ID = None
def _get_full_customization_args(self):
"""Populates the customization_args dict of the interaction with
default values, if any of the expected customization_args are missing.
"""
full_customization_args_dict = copy.deepcopy(self.customization_args)
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
for ca_spec in interaction.customization_arg_specs:
if ca_spec.name not in full_customization_args_dict:
full_customization_args_dict[ca_spec.name] = {
'value': ca_spec.default_value
}
return full_customization_args_dict
def to_dict(self):
return {
'id': self.id,
'customization_args': (
{} if self.id is None
else self._get_full_customization_args()),
'handlers': [handler.to_dict() for handler in self.handlers],
}
@classmethod
def _get_obj_type(cls, interaction_id):
if interaction_id is None:
return None
else:
return interaction_registry.Registry.get_interaction_by_id(
interaction_id)._handlers[0]['obj_type']
@classmethod
def from_dict(cls, interaction_dict):
obj_type = cls._get_obj_type(interaction_dict['id'])
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerHandlerInstance.from_dict_and_obj_type(h, obj_type)
for h in interaction_dict['handlers']])
def __init__(
self, interaction_id, customization_args, handlers):
self.id = interaction_id
# Customization args for the interaction's view. Parts of these
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
# Answer handlers and rule specs.
self.handlers = [AnswerHandlerInstance(h.name, h.rule_specs)
for h in handlers]
@property
def is_terminal(self):
return interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def validate(self):
if not isinstance(self.id, basestring):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_arg_names = [
ca_spec.name for ca_spec in interaction.customization_arg_specs]
if not isinstance(self.customization_args, dict):
raise utils.ValidationError(
'Expected customization args to be a dict, received %s'
% self.customization_args)
# Validate and clean up the customization args.
extra_args = []
for (arg_name, arg_value) in self.customization_args.iteritems():
if not isinstance(arg_name, basestring):
raise utils.ValidationError(
'Invalid customization arg name: %s' % arg_name)
if arg_name not in customization_arg_names:
extra_args.append(arg_name)
logging.warning(
'Interaction %s does not support customization arg %s.'
% (self.id, arg_name))
for extra_arg in extra_args:
del self.customization_args[extra_arg]
try:
interaction.validate_customization_arg_values(
self.customization_args)
except Exception:
# TODO(sll): Raise an exception here if parameters are not
# involved. (If they are, can we get sample values for the state
# context parameters?)
pass
if not isinstance(self.handlers, list):
raise utils.ValidationError(
'Expected answer handlers to be a list, received %s'
% self.handlers)
if len(self.handlers) < 1:
raise utils.ValidationError(
'At least one answer handler must be specified for each '
'interaction instance.')
for handler in self.handlers:
handler.validate()
@classmethod
def create_default_interaction(cls, default_dest_state_name):
default_obj_type = InteractionInstance._get_obj_type(
cls._DEFAULT_INTERACTION_ID)
return cls(
cls._DEFAULT_INTERACTION_ID,
{},
[AnswerHandlerInstance.get_default_handler(
default_dest_state_name, default_obj_type)]
)
class GadgetInstance(object):
"""Value object for an instance of a gadget."""
def __init__(self, gadget_id, visible_in_states, customization_args):
self.id = gadget_id
# List of State name strings where this Gadget is visible.
self.visible_in_states = visible_in_states
# Customization args for the gadget's view.
self.customization_args = customization_args
@property
def gadget(self):
"""Gadget spec for validation and derived properties below."""
return gadget_registry.Registry.get_gadget_by_id(self.id)
@property
def width(self):
"""Width in pixels."""
return self.gadget.get_width(self.customization_args)
@property
def height(self):
"""Height in pixels."""
return self.gadget.get_height(self.customization_args)
def validate(self):
"""Validate attributes of this GadgetInstance."""
try:
self.gadget
except KeyError:
raise utils.ValidationError(
'Unknown gadget with ID %s is not in the registry.' % self.id)
unknown_customization_arguments = set(
self.customization_args.iterkeys()) - set(
[customization_arg.name for customization_arg
in self.gadget.customization_arg_specs])
if unknown_customization_arguments:
for arg_name in unknown_customization_arguments:
logging.warning(
'Gadget %s does not support customization arg %s.'
% (self.id, arg_name))
del self.customization_args[arg_name]
self.gadget.validate(self.customization_args)
if self.visible_in_states == []:
raise utils.ValidationError(
'%s gadget not visible in any states.' % (
self.gadget.name))
def to_dict(self):
"""Returns GadgetInstance data represented in dict form."""
return {
'gadget_id': self.id,
'visible_in_states': self.visible_in_states,
'customization_args': self._get_full_customization_args(),
}
@classmethod
def from_dict(cls, gadget_dict):
"""Returns GadgetInstance constructed from dict data."""
return GadgetInstance(
gadget_dict['gadget_id'],
gadget_dict['visible_in_states'],
gadget_dict['customization_args'])
def _get_full_customization_args(self):
"""Populates the customization_args dict of the gadget with
default values, if any of the expected customization_args are missing.
"""
full_customization_args_dict = copy.deepcopy(self.customization_args)
for ca_spec in self.gadget.customization_arg_specs:
if ca_spec.name not in full_customization_args_dict:
full_customization_args_dict[ca_spec.name] = {
'value': ca_spec.default_value
}
return full_customization_args_dict
class SkinInstance(object):
"""Domain object for a skin instance."""
def __init__(self, skin_id, skin_customizations):
self.skin_id = skin_id
# panel_contents_dict has gadget_panel_name strings as keys and
# lists of GadgetInstance instances as values.
self.panel_contents_dict = {}
for panel_name, gdict_list in skin_customizations[
'panels_contents'].iteritems():
self.panel_contents_dict[panel_name] = [GadgetInstance(
gdict['gadget_id'], gdict['visible_in_states'],
gdict['customization_args']) for gdict in gdict_list]
@property
def skin(self):
"""Skin spec for validation and derived properties."""
return skins_services.Registry.get_skin_by_id(self.skin_id)
def validate(self):
"""Validates that gadgets fit the skin panel dimensions, and that the
gadgets themselves are valid."""
for panel_name, gadget_instances_list in (
self.panel_contents_dict.iteritems()):
# Validate existence of panels in the skin.
if not panel_name in self.skin.panels_properties:
raise utils.ValidationError(
'%s panel not found in skin %s' % (
panel_name, self.skin_id)
)
# Validate gadgets fit each skin panel.
self.skin.validate_panel(panel_name, gadget_instances_list)
# Validate gadget internal attributes.
for gadget_instance in gadget_instances_list:
gadget_instance.validate()
def to_dict(self):
"""Returns SkinInstance data represented in dict form.
"""
return {
'skin_id': self.skin_id,
'skin_customizations': {
'panels_contents': {
panel_name: [
gadget_instance.to_dict() for gadget_instance
in instances_list]
for panel_name, instances_list in
self.panel_contents_dict.iteritems()
},
}
}
@classmethod
def from_dict(cls, skin_dict):
"""Returns SkinInstance instance given dict form."""
return SkinInstance(
skin_dict['skin_id'],
skin_dict['skin_customizations'])
def get_state_names_required_by_gadgets(self):
"""Returns a list of strings representing State names required by
GadgetInstances in this skin."""
state_names = set()
for gadget_instances_list in self.panel_contents_dict.values():
for gadget_instance in gadget_instances_list:
for state_name in gadget_instance.visible_in_states:
state_names.add(state_name)
# We convert to a sorted list for clean deterministic testing.
return sorted(state_names)
class State(object):
"""Domain object for a state."""
NULL_INTERACTION_DICT = {
'id': None,
'customization_args': {},
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': feconf.DEFAULT_INIT_STATE_NAME,
'definition': {
'rule_type': 'default',
},
'feedback': [],
'param_changes': [],
}],
}],
}
def __init__(self, content, param_changes, interaction):
# The content displayed to the reader in this state.
self.content = [Content(item.type, item.value) for item in content]
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args)
for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.handlers)
def validate(self, allow_null_interaction):
if not isinstance(self.content, list):
raise utils.ValidationError(
'Expected state content to be a list, received %s'
% self.content)
if len(self.content) != 1:
raise utils.ValidationError(
'The state content list must have exactly one element. '
'Received %s' % self.content)
self.content[0].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction:
if self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
else:
self.interaction.validate()
def update_content(self, content_list):
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = [Content.from_dict(content_list[0])]
def update_param_changes(self, param_change_dicts):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_change_dicts]
def update_interaction_id(self, interaction_id):
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.handlers (except for
# the default rule). This is somewhat mitigated because the client
# updates interaction_handlers directly after this, but we should fix
# it.
def update_interaction_customization_args(self, customization_args):
self.interaction.customization_args = customization_args
def update_interaction_handlers(self, handlers_dict):
if not isinstance(handlers_dict, dict):
raise Exception(
'Expected interaction_handlers to be a dictionary, received %s'
% handlers_dict)
ruleset = handlers_dict[feconf.SUBMIT_HANDLER_NAME]
if not isinstance(ruleset, list):
raise Exception(
'Expected interaction_handlers.submit to be a list, '
'received %s' % ruleset)
interaction_handlers = [AnswerHandlerInstance('submit', [])]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule_dict = ruleset[rule_ind]
rule_dict['feedback'] = [html_cleaner.clean(feedback)
for feedback in rule_dict['feedback']]
if 'param_changes' not in rule_dict:
rule_dict['param_changes'] = []
obj_type = InteractionInstance._get_obj_type(self.interaction.id)
rule_spec = RuleSpec.from_dict_and_obj_type(rule_dict, obj_type)
rule_type = rule_spec.definition['rule_type']
if rule_ind == len(ruleset) - 1:
if rule_type != rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: the last rule should be a '
'default rule' % rule_dict)
else:
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: rules other than the '
'last one should not be default rules.' % rule_dict)
# TODO(sll): Generalize this to Boolean combinations of rules.
matched_rule = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_by_name('submit', rule_spec.definition['name']))
# Normalize and store the rule params.
# TODO(sll): Generalize this to Boolean combinations of rules.
rule_inputs = rule_spec.definition['inputs']
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.iteritems():
param_type = rule_domain.get_obj_type_for_param_name(
matched_rule, param_name)
if (isinstance(value, basestring) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except TypeError:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
interaction_handlers[0].rule_specs.append(rule_spec)
self.interaction.handlers = interaction_handlers
def to_dict(self):
return {
'content': [item.to_dict() for item in self.content],
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict()
}
@classmethod
def _get_current_state_dict(cls, state_dict):
"""If the state dict still uses 'widget', change it to 'interaction'.
This corresponds to the v3 --> v4 migration in the YAML representation
of an exploration.
"""
if 'widget' in state_dict:
# This is an old version of the state dict which still uses
# 'widget'.
state_dict['interaction'] = copy.deepcopy(state_dict['widget'])
state_dict['interaction']['id'] = copy.deepcopy(
state_dict['interaction']['widget_id'])
del state_dict['interaction']['widget_id']
del state_dict['widget']
return copy.deepcopy(state_dict)
@classmethod
def from_dict(cls, state_dict):
current_state_dict = cls._get_current_state_dict(state_dict)
return cls(
[Content.from_dict(item)
for item in current_state_dict['content']],
[param_domain.ParamChange.from_dict(param)
for param in current_state_dict['param_changes']],
InteractionInstance.from_dict(current_state_dict['interaction']))
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
text_str = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
return cls(
[Content('text', text_str)], [],
InteractionInstance.create_default_interaction(
default_dest_state_name))
class Exploration(object):
"""Domain object for an Oppia exploration."""
def __init__(self, exploration_id, title, category, objective,
language_code, tags, blurb, author_notes, default_skin,
skin_customizations, init_state_name, states_dict,
param_specs_dict, param_changes_list, version,
created_on=None, last_updated=None):
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.blurb = blurb
self.author_notes = author_notes
self.default_skin = default_skin
self.init_state_name = init_state_name
self.skin_instance = SkinInstance(default_skin, skin_customizations)
self.states = {}
for (state_name, state_dict) in states_dict.iteritems():
self.states[state_name] = State.from_dict(state_dict)
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_changes_list]
self.version = version
self.created_on = created_on
self.last_updated = last_updated
def is_equal_to(self, other):
simple_props = [
'id', 'title', 'category', 'objective', 'language_code',
'tags', 'blurb', 'author_notes', 'default_skin',
'init_state_name', 'version']
for prop in simple_props:
if getattr(self, prop) != getattr(other, prop):
return False
for (state_name, state_obj) in self.states.iteritems():
if state_name not in other.states:
return False
if state_obj.to_dict() != other.states[state_name].to_dict():
return False
for (ps_name, ps_obj) in self.param_specs.iteritems():
if ps_name not in other.param_specs:
return False
if ps_obj.to_dict() != other.param_specs[ps_name].to_dict():
return False
for i in xrange(len(self.param_changes)):
if (self.param_changes[i].to_dict() !=
other.param_changes[i].to_dict()):
return False
return True
@classmethod
def create_default_exploration(
cls, exploration_id, title, category, objective='',
language_code=feconf.DEFAULT_LANGUAGE_CODE):
init_state_dict = State.create_default_state(
feconf.DEFAULT_INIT_STATE_NAME, is_initial_state=True).to_dict()
states_dict = {
feconf.DEFAULT_INIT_STATE_NAME: init_state_dict
}
return cls(
exploration_id, title, category, objective, language_code, [], '',
'', 'conversation_v1', feconf.DEFAULT_SKIN_CUSTOMIZATIONS,
feconf.DEFAULT_INIT_STATE_NAME, states_dict, {}, [], 0)
@classmethod
def _require_valid_name(cls, name, name_type):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
"""
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise utils.ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise utils.ValidationError(
'Names should not start or end with whitespace.')
if re.search('\s\s+', name):
raise utils.ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for c in feconf.INVALID_NAME_CHARS:
if c in name:
raise utils.ValidationError(
'Invalid character %s in %s: %s' % (c, name_type, name))
@classmethod
def _require_valid_state_name(cls, name):
cls._require_valid_name(name, 'a state name')
if name.lower() == feconf.END_DEST.lower():
raise utils.ValidationError(
'Invalid state name: %s' % feconf.END_DEST)
def validate(self, strict=False, allow_null_interaction=False):
"""Validates the exploration before it is committed to storage.
If strict is True, performs advanced validation.
"""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
self._require_valid_name(self.title, 'the exploration title')
if not isinstance(self.category, basestring):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
self._require_valid_name(self.category, 'the exploration category')
if not isinstance(self.objective, basestring):
raise utils.ValidationError(
'Expected objective to be a string, received %s' %
self.objective)
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language_code to be a string, received %s' %
self.language_code)
if not any([self.language_code == lc['code']
for lc in feconf.ALL_LANGUAGE_CODES]):
raise utils.ValidationError(
'Invalid language_code: %s' % self.language_code)
if not isinstance(self.tags, list):
raise utils.ValidationError(
'Expected \'tags\' to be a list, received %s' % self.tags)
for tag in self.tags:
if not isinstance(tag, basestring):
raise utils.ValidationError(
'Expected each tag in \'tags\' to be a string, received '
'\'%s\'' % tag)
if not tag:
raise utils.ValidationError('Tags should be non-empty.')
if not re.match(feconf.TAG_REGEX, tag):
raise utils.ValidationError(
'Tags should only contain lowercase letters and spaces, '
'received \'%s\'' % tag)
if (tag[0] not in string.ascii_lowercase or
tag[-1] not in string.ascii_lowercase):
raise utils.ValidationError(
'Tags should not start or end with whitespace, received '
' \'%s\'' % tag)
if re.search('\s\s+', tag):
raise utils.ValidationError(
'Adjacent whitespace in tags should be collapsed, '
'received \'%s\'' % tag)
if len(set(self.tags)) != len(self.tags):
raise utils.ValidationError('Some tags duplicate each other')
if not isinstance(self.blurb, basestring):
raise utils.ValidationError(
'Expected blurb to be a string, received %s' % self.blurb)
if not isinstance(self.author_notes, basestring):
raise utils.ValidationError(
'Expected author_notes to be a string, received %s' %
self.author_notes)
if not self.default_skin:
raise utils.ValidationError(
'Expected a default_skin to be specified.')
if not isinstance(self.default_skin, basestring):
raise utils.ValidationError(
'Expected default_skin to be a string, received %s (%s).'
% self.default_skin, type(self.default_skin))
if not self.default_skin in skins_services.Registry.get_all_skin_ids():
raise utils.ValidationError(
'Unrecognized skin id: %s' % self.default_skin)
if not isinstance(self.states, dict):
raise utils.ValidationError(
'Expected states to be a dict, received %s' % self.states)
if not self.states:
raise utils.ValidationError('This exploration has no states.')
for state_name in self.states:
self._require_valid_state_name(state_name)
self.states[state_name].validate(
allow_null_interaction=allow_null_interaction)
if not self.init_state_name:
raise utils.ValidationError(
'This exploration has no initial state name specified.')
if self.init_state_name not in self.states:
raise utils.ValidationError(
'There is no state in %s corresponding to the exploration\'s '
'initial state name %s.' %
(self.states.keys(), self.init_state_name))
if not isinstance(self.param_specs, dict):
raise utils.ValidationError(
'Expected param_specs to be a dict, received %s'
% self.param_specs)
for param_name in self.param_specs:
if not isinstance(param_name, basestring):
raise utils.ValidationError(
'Expected parameter name to be a string, received %s (%s).'
% param_name, type(param_name))
if not re.match(feconf.ALPHANUMERIC_REGEX, param_name):
raise utils.ValidationError(
'Only parameter names with characters in [a-zA-Z0-9] are '
'accepted.')
self.param_specs[param_name].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'No parameter named \'%s\' exists in this exploration'
% param_change.name)
if param_change.name in feconf.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The exploration-level parameter with name \'%s\' is '
'reserved. Please choose a different name.'
% param_change.name)
# TODO(sll): Find a way to verify the param change customization args
# when they depend on exploration/state parameters (e.g. the generated
# values must have the correct obj_type). Can we get sample values for
# the reader's answer and these parameters by looking at states that
# link to this one?
# Check that all state param changes are valid.
for state_name, state in self.states.iteritems():
for param_change in state.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter with name \'%s\' was set in state '
'\'%s\', but it does not exist in the list of '
'parameter specifications for this exploration.'
% (param_change.name, state_name))
if param_change.name in feconf.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The parameter name \'%s\' is reserved. Please choose '
'a different name for the parameter being set in '
'state \'%s\'.' % (param_change.name, state_name))
# Check that all rule definitions, destinations and param changes are
# valid.
all_state_names = self.states.keys() + [feconf.END_DEST]
for state in self.states.values():
for handler in state.interaction.handlers:
for rule_spec in handler.rule_specs:
RuleSpec.validate_rule_definition(
rule_spec.definition, self.param_specs)
if rule_spec.dest not in all_state_names:
raise utils.ValidationError(
'The destination %s is not a valid state.'
% rule_spec.dest)
for param_change in rule_spec.param_changes:
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter %s was used in a rule, but it '
'does not exist in this exploration'
% param_change.name)
# Check that state names required by gadgets exist.
state_names_required_by_gadgets = set(
self.skin_instance.get_state_names_required_by_gadgets())
missing_state_names = state_names_required_by_gadgets - set(
self.states.keys())
if missing_state_names:
raise utils.ValidationError(
'Exploration missing required state%s: %s' % (
's' if len(missing_state_names) > 1 else '',
', '.join(sorted(missing_state_names)))
)
# Check that GadgetInstances fit the skin and that gadgets are valid.
self.skin_instance.validate()
if strict:
warnings_list = []
try:
self._verify_all_states_reachable()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
try:
self._verify_no_dead_ends()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
if not self.objective:
warnings_list.append(
'An objective must be specified (in the \'Settings\' tab).'
)
if not self.language_code:
warnings_list.append(
'A language must be specified (in the \'Settings\' tab).')
if len(warnings_list) > 0:
warning_str = ''
for ind, warning in enumerate(warnings_list):
warning_str += '%s. %s ' % (ind + 1, warning)
raise utils.ValidationError(
'Please fix the following issues before saving this '
'exploration: %s' % warning_str)
def _verify_all_states_reachable(self):
"""Verifies that all states are reachable from the initial state."""
# This queue stores state names.
processed_queue = []
curr_queue = [self.init_state_name]
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
processed_queue.append(curr_state_name)
curr_state = self.states[curr_state_name]
if not _is_interaction_terminal(curr_state.interaction.id):
for handler in curr_state.interaction.handlers:
for rule in handler.rule_specs:
dest_state = rule.dest
if (dest_state not in curr_queue and
dest_state not in processed_queue and
dest_state != feconf.END_DEST):
curr_queue.append(dest_state)
if len(self.states) != len(processed_queue):
unseen_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'The following states are not reachable from the initial '
'state: %s' % ', '.join(unseen_states))
def _verify_no_dead_ends(self):
"""Verifies that all states can reach a terminal state."""
# This queue stores state names.
processed_queue = []
curr_queue = [feconf.END_DEST]
for (state_name, state) in self.states.iteritems():
if _is_interaction_terminal(state.interaction.id):
curr_queue.append(state_name)
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
if curr_state_name != feconf.END_DEST:
processed_queue.append(curr_state_name)
for (state_name, state) in self.states.iteritems():
if (state_name not in curr_queue
and state_name not in processed_queue):
for handler in state.interaction.handlers:
for rule_spec in handler.rule_specs:
if rule_spec.dest == curr_state_name:
curr_queue.append(state_name)
break
if len(self.states) != len(processed_queue):
dead_end_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'It is impossible to complete the exploration from the '
'following states: %s' % ', '.join(dead_end_states))
# Derived attributes of an exploration,
@property
def init_state(self):
"""The state which forms the start of this exploration."""
return self.states[self.init_state_name]
@property
def param_specs_dict(self):
"""A dict of param specs, each represented as Python dicts."""
return {ps_name: ps_val.to_dict()
for (ps_name, ps_val) in self.param_specs.iteritems()}
@property
def param_change_dicts(self):
"""A list of param changes, represented as JSONifiable Python dicts."""
return [param_change.to_dict() for param_change in self.param_changes]
@classmethod
def is_demo_exploration_id(cls, exploration_id):
"""Whether the exploration id is that of a demo exploration."""
return exploration_id.isdigit() and (
0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS))
@property
def is_demo(self):
"""Whether the exploration is one of the demo explorations."""
return self.is_demo_exploration_id(self.id)
def update_title(self, title):
self.title = title
def update_category(self, category):
self.category = category
def update_objective(self, objective):
self.objective = objective
def update_language_code(self, language_code):
self.language_code = language_code
def update_tags(self, tags):
self.tags = tags
def update_blurb(self, blurb):
self.blurb = blurb
def update_author_notes(self, author_notes):
self.author_notes = author_notes
def update_param_specs(self, param_specs_dict):
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
def update_param_changes(self, param_changes_list):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change)
for param_change in param_changes_list
]
def update_default_skin_id(self, default_skin_id):
self.default_skin = default_skin_id
def update_init_state_name(self, init_state_name):
if init_state_name not in self.states:
raise Exception(
'Invalid new initial state name: %s; '
'it is not in the list of states %s for this '
'exploration.' % (init_state_name, self.states.keys()))
self.init_state_name = init_state_name
# Methods relating to states.
def add_states(self, state_names):
"""Adds multiple states to the exploration."""
for state_name in state_names:
if state_name in self.states:
raise ValueError('Duplicate state name %s' % state_name)
for state_name in state_names:
self.states[state_name] = State.create_default_state(state_name)
def rename_state(self, old_state_name, new_state_name):
"""Renames the given state."""
if old_state_name not in self.states:
raise ValueError('State %s does not exist' % old_state_name)
if (old_state_name != new_state_name and
new_state_name in self.states):
raise ValueError('Duplicate state name: %s' % new_state_name)
if old_state_name == new_state_name:
return
self._require_valid_state_name(new_state_name)
self.states[new_state_name] = copy.deepcopy(
self.states[old_state_name])
del self.states[old_state_name]
if self.init_state_name == old_state_name:
self.update_init_state_name(new_state_name)
# Find all destinations in the exploration which equal the renamed
# state, and change the name appropriately.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.interaction.handlers:
for rule in handler.rule_specs:
if rule.dest == old_state_name:
rule.dest = new_state_name
def delete_state(self, state_name):
"""Deletes the given state."""
if state_name not in self.states:
raise ValueError('State %s does not exist' % state_name)
# Do not allow deletion of initial states.
if self.init_state_name == state_name:
raise ValueError('Cannot delete initial state of an exploration.')
# Find all destinations in the exploration which equal the deleted
# state, and change them to loop back to their containing state.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.interaction.handlers:
for rule in handler.rule_specs:
if rule.dest == state_name:
rule.dest = other_state_name
del self.states[state_name]
# The current version of the exploration schema. If any backward-
# incompatible changes are made to the exploration schema in the YAML
# definitions, this version number must be changed and a migration process
# put in place.
CURRENT_EXPLORATION_SCHEMA_VERSION = 5
@classmethod
def _convert_v1_dict_to_v2_dict(cls, exploration_dict):
"""Converts a v1 exploration dict into a v2 exploration dict."""
exploration_dict['schema_version'] = 2
exploration_dict['init_state_name'] = (
exploration_dict['states'][0]['name'])
states_dict = {}
for state in exploration_dict['states']:
states_dict[state['name']] = state
del states_dict[state['name']]['name']
exploration_dict['states'] = states_dict
return exploration_dict
@classmethod
def _convert_v2_dict_to_v3_dict(cls, exploration_dict):
"""Converts a v2 exploration dict into a v3 exploration dict."""
exploration_dict['schema_version'] = 3
exploration_dict['objective'] = ''
exploration_dict['language_code'] = feconf.DEFAULT_LANGUAGE_CODE
exploration_dict['skill_tags'] = []
exploration_dict['blurb'] = ''
exploration_dict['author_notes'] = ''
return exploration_dict
@classmethod
def _convert_v3_dict_to_v4_dict(cls, exploration_dict):
"""Converts a v3 exploration dict into a v4 exploration dict."""
exploration_dict['schema_version'] = 4
for _, state_defn in exploration_dict['states'].iteritems():
state_defn['interaction'] = copy.deepcopy(state_defn['widget'])
state_defn['interaction']['id'] = copy.deepcopy(
state_defn['interaction']['widget_id'])
del state_defn['interaction']['widget_id']
del state_defn['interaction']['sticky']
del state_defn['widget']
return exploration_dict
@classmethod
def _convert_v4_dict_to_v5_dict(cls, exploration_dict):
"""Converts a v4 exploration dict into a v5 exploration dict."""
exploration_dict['schema_version'] = 5
# Rename the 'skill_tags' field to 'tags'.
exploration_dict['tags'] = exploration_dict['skill_tags']
del exploration_dict['skill_tags']
exploration_dict['skin_customizations'] = (
feconf.DEFAULT_SKIN_CUSTOMIZATIONS)
return exploration_dict
@classmethod
def from_yaml(cls, exploration_id, title, category, yaml_content):
"""Creates and returns exploration from a YAML text string."""
try:
exploration_dict = utils.dict_from_yaml(yaml_content)
except Exception as e:
raise Exception(
'Please ensure that you are uploading a YAML text file, not '
'a zip file. The YAML parser returned the following error: %s'
% e)
exploration_schema_version = exploration_dict.get('schema_version')
if exploration_schema_version is None:
raise Exception('Invalid YAML file: no schema version specified.')
if not (1 <= exploration_schema_version
<= cls.CURRENT_EXPLORATION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1, v2, v3 and v4 YAML files at '
'present.')
if exploration_schema_version == 1:
exploration_dict = cls._convert_v1_dict_to_v2_dict(
exploration_dict)
exploration_schema_version = 2
if exploration_schema_version == 2:
exploration_dict = cls._convert_v2_dict_to_v3_dict(
exploration_dict)
exploration_schema_version = 3
if exploration_schema_version == 3:
exploration_dict = cls._convert_v3_dict_to_v4_dict(
exploration_dict)
exploration_schema_version = 4
if exploration_schema_version == 4:
exploration_dict = cls._convert_v4_dict_to_v5_dict(
exploration_dict)
exploration_schema_version = 5
exploration = cls.create_default_exploration(
exploration_id, title, category,
objective=exploration_dict['objective'],
language_code=exploration_dict['language_code'])
exploration.tags = exploration_dict['tags']
exploration.blurb = exploration_dict['blurb']
exploration.author_notes = exploration_dict['author_notes']
exploration.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val) for
(ps_name, ps_val) in exploration_dict['param_specs'].iteritems()
}
init_state_name = exploration_dict['init_state_name']
exploration.rename_state(exploration.init_state_name, init_state_name)
exploration.add_states([
state_name for state_name in exploration_dict['states']
if state_name != init_state_name])
for (state_name, sdict) in exploration_dict['states'].iteritems():
state = exploration.states[state_name]
state.content = [
Content(item['type'], html_cleaner.clean(item['value']))
for item in sdict['content']
]
state.param_changes = [param_domain.ParamChange(
pc['name'], pc['generator_id'], pc['customization_args']
) for pc in sdict['param_changes']]
for pc in state.param_changes:
if pc.name not in exploration.param_specs:
raise Exception('Parameter %s was used in a state but not '
'declared in the exploration param_specs.'
% pc.name)
idict = sdict['interaction']
interaction_handlers = [
AnswerHandlerInstance.from_dict_and_obj_type({
'name': handler['name'],
'rule_specs': [{
'definition': rule_spec['definition'],
'dest': rule_spec['dest'],
'feedback': [html_cleaner.clean(feedback)
for feedback in rule_spec['feedback']],
'param_changes': rule_spec.get('param_changes', []),
} for rule_spec in handler['rule_specs']],
}, InteractionInstance._get_obj_type(idict['id']))
for handler in idict['handlers']]
state.interaction = InteractionInstance(
idict['id'], idict['customization_args'],
interaction_handlers)
exploration.states[state_name] = state
exploration.default_skin = exploration_dict['default_skin']
exploration.param_changes = [
param_domain.ParamChange.from_dict(pc)
for pc in exploration_dict['param_changes']]
exploration.skin_instance = SkinInstance(
exploration_dict['default_skin'],
exploration_dict['skin_customizations'])
return exploration
def to_yaml(self):
return utils.yaml_from_dict({
'author_notes': self.author_notes,
'blurb': self.blurb,
'default_skin': self.default_skin,
'init_state_name': self.init_state_name,
'language_code': self.language_code,
'objective': self.objective,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'tags': self.tags,
'skin_customizations': self.skin_instance.to_dict()[
'skin_customizations'],
'states': {state_name: state.to_dict()
for (state_name, state) in self.states.iteritems()},
'schema_version': self.CURRENT_EXPLORATION_SCHEMA_VERSION
})
def to_player_dict(self):
"""Returns a copy of the exploration suitable for inclusion in the
learner view."""
return {
'init_state_name': self.init_state_name,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'states': {
state_name: state.to_dict()
for (state_name, state) in self.states.iteritems()
},
'title': self.title,
}
def get_interaction_ids(self):
"""Get all interaction ids used in this exploration."""
return list(set([
state.interaction.id for state in self.states.values()]))
class ExplorationSummary(object):
"""Domain object for an Oppia exploration summary."""
def __init__(self, exploration_id, title, category, objective,
language_code, tags, ratings, status,
community_owned, owner_ids, editor_ids,
viewer_ids, version, exploration_model_created_on,
exploration_model_last_updated):
"""'ratings' is a dict whose keys are '1', '2', '3', '4', '5' and whose
values are nonnegative integers representing frequency counts. Note
that the keys need to be strings in order for this dict to be
JSON-serializable.
"""
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.ratings = ratings
self.status = status
self.community_owned = community_owned
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.viewer_ids = viewer_ids
self.version = version
self.exploration_model_created_on = exploration_model_created_on
self.exploration_model_last_updated = exploration_model_last_updated
| apache-2.0 | 819,301,409,281,114,400 | 7,382,950,298,301,082,000 | 39.511905 | 79 | 0.583691 | false |
rrampage/rethinkdb | test/rql_test/connections/http_support/jinja2/parser.py | 637 | 35186 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import next, imap
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| agpl-3.0 | 1,444,097,924,840,187,400 | -7,379,437,060,677,992,000 | 38.313966 | 82 | 0.545757 | false |
boxlab/UltraEnc-X | 0.3.1/model/main_functions.py | 1 | 1133 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
impelement the main functions required by enc.py.
'''
from random import randint
from model.requirements import lmap, repeat
def generate_otp(sheets, strength, length):
for sheet in range(sheets):
filename = "uexor_%s.uep" % (str(sheet),)
with open(filename, 'w') as output:
for i in range(length):
print(randint(1, strength), file=output)
def load_sheet(filename):
with open(filename, 'r') as sheet:
contents = sheet.read().splitlines()
return lmap(int, contents)
def plaintext_to_str(plaintext):
return ''.join(lmap(chr, plaintext))
def get_plaintext():
plaintext = input('Please type your message: ')
return lmap(ord, plaintext)
def load_file(filename):
with open(filename, 'r') as file:
contents = file.read().splitlines()
return lmap(int, contents)
def save_file(filename, data):
with open(filename, 'w') as file:
file.write('\n'.join(lmap(str, data)))
| gpl-3.0 | -9,187,059,787,447,930,000 | -5,563,388,014,278,381,000 | 24.177778 | 72 | 0.578994 | false |
fluxw42/youtube-dl | youtube_dl/extractor/shahid.py | 38 | 5255 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
str_or_none,
urlencode_postdata,
clean_html,
)
class ShahidIE(InfoExtractor):
_NETRC_MACHINE = 'shahid'
_VALID_URL = r'https?://shahid\.mbc\.net/ar/(?P<type>episode|movie)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
'info_dict': {
'id': '90574',
'ext': 'mp4',
'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3',
'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان',
'duration': 2972,
'timestamp': 1422057420,
'upload_date': '20150123',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'https://shahid.mbc.net/ar/movie/151746/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9.html',
'only_matching': True
}, {
# shahid plus subscriber only
'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
'only_matching': True
}]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
try:
user_data = self._download_json(
'https://shahid.mbc.net/wd/service/users/login',
None, 'Logging in', data=json.dumps({
'email': email,
'password': password,
'basic': 'false',
}).encode('utf-8'), headers={
'Content-Type': 'application/json; charset=UTF-8',
})['user']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
fail_data = self._parse_json(
e.cause.read().decode('utf-8'), None, fatal=False)
if fail_data:
faults = fail_data.get('faults', [])
faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')])
if faults_message:
raise ExtractorError(faults_message, expected=True)
raise
self._download_webpage(
'https://shahid.mbc.net/populateContext',
None, 'Populate Context', data=urlencode_postdata({
'firstName': user_data['firstName'],
'lastName': user_data['lastName'],
'userName': user_data['email'],
'csg_user_name': user_data['email'],
'subscriberId': user_data['id'],
'sessionId': user_data['sessionId'],
}))
def _get_api_data(self, response):
data = response.get('data', {})
error = data.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())),
expected=True)
return data
def _real_extract(self, url):
page_type, video_id = re.match(self._VALID_URL, url).groups()
player = self._get_api_data(self._download_json(
'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-player.html' % video_id,
video_id, 'Downloading player JSON'))
if player.get('drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
self._sort_formats(formats)
video = self._get_api_data(self._download_json(
'http://api.shahid.net/api/v1_1/%s/%s' % (page_type, video_id),
video_id, 'Downloading video JSON', query={
'apiKey': 'sh@hid0nlin3',
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
}))[page_type]
title = video['title']
categories = [
category['name']
for category in video.get('genres', []) if 'name' in category]
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('thumbnailUrl'),
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('referenceDate')),
'categories': categories,
'series': video.get('showTitle') or video.get('showName'),
'season': video.get('seasonTitle'),
'season_number': int_or_none(video.get('seasonNumber')),
'season_id': str_or_none(video.get('seasonId')),
'episode_number': int_or_none(video.get('number')),
'episode_id': video_id,
'formats': formats,
}
| unlicense | -6,334,356,052,275,089,000 | -9,143,165,839,433,855,000 | 37.753731 | 245 | 0.534566 | false |
tima/ansible | lib/ansible/modules/network/asa/asa_command.py | 20 | 5713 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: asa_command
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Run arbitrary commands on Cisco ASA devices
description:
- Sends arbitrary commands to an ASA node and returns the results
read from the device. The C(asa_command) module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: asa
options:
commands:
description:
- List of commands to send to the remote device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
transport: cli
---
- asa_command:
commands:
- show version
provider: "{{ cli }}"
- asa_command:
commands:
- show asp drop
- show memory
provider: "{{ cli }}"
- asa_command:
commands:
- show version
provider: "{{ cli }}"
context: system
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.asa.asa import asa_argument_spec, check_args
from ansible.module_utils.network.asa.asa import run_commands
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def main():
spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
spec.update(asa_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
check_args(module)
result = {'changed': False}
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,206,390,778,442,285,000 | -5,340,150,151,314,349,000 | 27.565 | 92 | 0.64397 | false |
fkorotkov/pants | contrib/android/tests/python/pants_test/contrib/android/test_android_manifest_parser.py | 14 | 3340 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.util.xml_test_base import XmlTestBase
from pants.contrib.android.android_manifest_parser import AndroidManifest, AndroidManifestParser
class TestAndroidManifestParser(XmlTestBase):
"""Test the AndroidManifestParser and AndroidManifest classes."""
# Test AndroidManifestParser.parse_manifest().
def test_parse_manifest(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.path, xml)
def test_bad_parse_manifest(self):
xml = '/no/file/here'
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
# Test AndroidManifest.package_name.
def test_package_name(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'org.pantsbuild.example.hello')
def test_missing_manifest_element(self):
with self.xml_file(manifest_element='some_other_element') as xml:
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
def test_missing_package_attribute(self):
with self.xml_file(package_attribute='bad_value') as xml:
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
def test_weird_package_name(self):
# Should accept unexpected package names, the info gets verified in classes that consume it.
with self.xml_file(package_value='cola') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'cola')
# Test AndroidManifest.target_sdk.
def test_target_sdk(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.target_sdk, '19')
# These next tests show AndroidManifest.target_sdk fails silently and returns None.
def test_no_uses_sdk_element(self):
with self.xml_file(uses_sdk_element='something-random') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(manifest.target_sdk)
def test_no_target_sdk_value(self):
with self.xml_file(android_attribute='android:bad_value') as xml:
parsed = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(parsed.target_sdk)
def test_no_android_part(self):
with self.xml_file(android_attribute='unrelated:targetSdkVersion') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'org.pantsbuild.example.hello')
def test_missing_whole_targetsdk(self):
with self.xml_file(android_attribute='unrelated:cola') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(manifest.target_sdk)
# Test AndroidManifest().
def test_android_manifest(self):
with self.xml_file() as xml:
test = AndroidManifest(xml, '19', 'com.foo.bar')
self.assertEqual(test.path, xml)
| apache-2.0 | -856,118,369,279,277,400 | -8,228,434,546,012,603,000 | 40.75 | 96 | 0.739521 | false |
eurosata1/e2 | lib/python/Screens/LocationBox.py | 2 | 16935 | #
# Generic Screen to select a path/filename combination
#
# GUI (Screens)
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.InputBox import InputBox
from Screens.HelpMenu import HelpableScreen
from Screens.ChoiceBox import ChoiceBox
# Generic
from Tools.BoundFunction import boundFunction
from Tools.Directories import *
from Components.config import config
import os
# Quickselect
from Tools.NumericalTextInput import NumericalTextInput
# GUI (Components)
from Components.ActionMap import NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.FileList import FileList
from Components.MenuList import MenuList
# Timer
from enigma import eTimer
defaultInhibitDirs = ["/bin", "/boot", "/dev", "/etc", "/lib", "/proc", "/sbin", "/sys", "/usr", "/var"]
class LocationBox(Screen, NumericalTextInput, HelpableScreen):
"""Simple Class similar to MessageBox / ChoiceBox but used to choose a folder/pathname combination"""
skin = """<screen name="LocationBox" position="100,75" size="540,460" >
<widget name="text" position="0,2" size="540,22" font="Regular;22" />
<widget name="target" position="0,23" size="540,22" valign="center" font="Regular;22" />
<widget name="filelist" position="0,55" zPosition="1" size="540,210" scrollbarMode="showOnDemand" selectionDisabled="1" />
<widget name="textbook" position="0,272" size="540,22" font="Regular;22" />
<widget name="booklist" position="5,302" zPosition="2" size="535,100" scrollbarMode="showOnDemand" />
<widget name="red" position="0,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="green" position="135,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<widget name="key_green" position="135,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="yellow" position="270,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<widget name="key_yellow" position="270,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="blue" position="405,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_blue" position="405,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, text = "", filename = "", currDir = None, bookmarks = None, userMode = False, windowTitle = "Select location", minFree = None, autoAdd = False, editDir = False, inhibitDirs = [], inhibitMounts = []):
# Init parents
Screen.__init__(self, session)
NumericalTextInput.__init__(self, handleTimeout = False)
HelpableScreen.__init__(self)
# Set useable chars
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
# Quickselect Timer
self.qs_timer = eTimer()
self.qs_timer.callback.append(self.timeout)
self.qs_timer_type = 0
# Initialize Quickselect
self.curr_pos = -1
self.quickselect = ""
# Set Text
self["text"] = Label(text)
self["textbook"] = Label(_("Bookmarks"))
# Save parameters locally
self.text = text
self.filename = filename
self.minFree = minFree
self.realBookmarks = bookmarks
self.bookmarks = bookmarks and bookmarks.value[:] or []
self.userMode = userMode
self.autoAdd = autoAdd
self.editDir = editDir
self.inhibitDirs = inhibitDirs
# Initialize FileList
self["filelist"] = FileList(currDir, showDirectories = True, showFiles = False, inhibitMounts = inhibitMounts, inhibitDirs = inhibitDirs)
# Initialize BookList
self["booklist"] = MenuList(self.bookmarks)
# Buttons
self["key_green"] = Button(_("OK"))
self["key_yellow"] = Button(_("Rename"))
self["key_blue"] = Button(_("Remove bookmark"))
self["key_red"] = Button(_("Cancel"))
# Background for Buttons
self["green"] = Pixmap()
self["yellow"] = Pixmap()
self["blue"] = Pixmap()
self["red"] = Pixmap()
# Initialize Target
self["target"] = Label()
if self.userMode:
self.usermodeOn()
# Custom Action Handler
class LocationBoxActionMap(HelpableActionMap):
def __init__(self, parent, context, actions = { }, prio=0):
HelpableActionMap.__init__(self, parent, context, actions, prio)
self.box = parent
def action(self, contexts, action):
# Reset Quickselect
self.box.timeout(force = True)
return HelpableActionMap.action(self, contexts, action)
# Actions that will reset quickselect
self["WizardActions"] = LocationBoxActionMap(self, "WizardActions",
{
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"ok": (self.ok, _("select")),
"back": (self.cancel, _("Cancel")),
}, -2)
self["ColorActions"] = LocationBoxActionMap(self, "ColorActions",
{
"red": self.cancel,
"green": self.select,
"yellow": self.changeName,
"blue": self.addRemoveBookmark,
}, -2)
self["EPGSelectActions"] = LocationBoxActionMap(self, "EPGSelectActions",
{
"prevBouquet": (self.switchToBookList, _("switch to bookmarks")),
"nextBouquet": (self.switchToFileList, _("switch to filelist")),
}, -2)
self["MenuActions"] = LocationBoxActionMap(self, "MenuActions",
{
"menu": (self.showMenu, _("menu")),
}, -2)
# Actions used by quickselect
self["NumberActions"] = NumberActionMap(["NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
# Run some functions when shown
self.onShown.extend((
boundFunction(self.setTitle, _(windowTitle)),
self.updateTarget,
self.showHideRename,
))
self.onLayoutFinish.append(self.switchToFileListOnStart)
# Make sure we remove our callback
self.onClose.append(self.disableTimer)
def switchToFileListOnStart(self):
if self.realBookmarks and self.realBookmarks.value:
self.currList = "booklist"
currDir = self["filelist"].current_directory
if currDir in self.bookmarks:
self["booklist"].moveToIndex(self.bookmarks.index(currDir))
else:
self.switchToFileList()
def disableTimer(self):
self.qs_timer.callback.remove(self.timeout)
def showHideRename(self):
# Don't allow renaming when filename is empty
if self.filename == "":
self["key_yellow"].hide()
def switchToFileList(self):
if not self.userMode:
self.currList = "filelist"
self["filelist"].selectionEnabled(1)
self["booklist"].selectionEnabled(0)
self["key_blue"].text = _("Add bookmark")
self.updateTarget()
def switchToBookList(self):
self.currList = "booklist"
self["filelist"].selectionEnabled(0)
self["booklist"].selectionEnabled(1)
self["key_blue"].text = _("Remove bookmark")
self.updateTarget()
def addRemoveBookmark(self):
if self.currList == "filelist":
# add bookmark
folder = self["filelist"].getSelection()[0]
if folder is not None and not folder in self.bookmarks:
self.bookmarks.append(folder)
self.bookmarks.sort()
self["booklist"].setList(self.bookmarks)
else:
# remove bookmark
if not self.userMode:
name = self["booklist"].getCurrent()
self.session.openWithCallback(
boundFunction(self.removeBookmark, name),
MessageBox,
_("Do you really want to remove your bookmark of %s?") % (name),
)
def removeBookmark(self, name, ret):
if not ret:
return
if name in self.bookmarks:
self.bookmarks.remove(name)
self["booklist"].setList(self.bookmarks)
def createDir(self):
if self["filelist"].current_directory != None:
self.session.openWithCallback(
self.createDirCallback,
InputBox,
title = _("Please enter name of the new directory"),
text = self.filename
)
def createDirCallback(self, res):
if res:
path = os.path.join(self["filelist"].current_directory, res)
if not pathExists(path):
if not createDir(path):
self.session.open(
MessageBox,
_("Creating directory %s failed.") % (path),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
self["filelist"].refresh()
else:
self.session.open(
MessageBox,
_("The path %s already exists.") % (path),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def removeDir(self):
sel = self["filelist"].getSelection()
if sel and pathExists(sel[0]):
self.session.openWithCallback(
boundFunction(self.removeDirCallback, sel[0]),
MessageBox,
_("Do you really want to remove directory %s from the disk?") % (sel[0]),
type = MessageBox.TYPE_YESNO
)
else:
self.session.open(
MessageBox,
_("Invalid directory selected: %s") % (sel[0]),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def removeDirCallback(self, name, res):
if res:
if not removeDir(name):
self.session.open(
MessageBox,
_("Removing directory %s failed. (Maybe not empty.)") % (name),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
else:
self["filelist"].refresh()
self.removeBookmark(name, True)
val = self.realBookmarks and self.realBookmarks.value
if val and name in val:
val.remove(name)
self.realBookmarks.value = val
self.realBookmarks.save()
def up(self):
self[self.currList].up()
self.updateTarget()
def down(self):
self[self.currList].down()
self.updateTarget()
def left(self):
self[self.currList].pageUp()
self.updateTarget()
def right(self):
self[self.currList].pageDown()
self.updateTarget()
def ok(self):
if self.currList == "filelist":
if self["filelist"].canDescent():
self["filelist"].descent()
self.updateTarget()
else:
self.select()
def cancel(self):
self.close(None)
def getPreferredFolder(self):
if self.currList == "filelist":
# XXX: We might want to change this for parent folder...
return self["filelist"].getSelection()[0]
else:
return self["booklist"].getCurrent()
def selectConfirmed(self, ret):
if ret:
ret = ''.join((self.getPreferredFolder(), self.filename))
if self.realBookmarks:
if self.autoAdd and not ret in self.bookmarks:
if self.getPreferredFolder() not in self.bookmarks:
self.bookmarks.append(self.getPreferredFolder())
self.bookmarks.sort()
if self.bookmarks != self.realBookmarks.value:
self.realBookmarks.value = self.bookmarks
self.realBookmarks.save()
if self.filename and not pathExists(ret):
menu = [(_("Create new folder and exit"), "folder"), (_("Save and exit"), "exit")]
text = _("Select action")
def dirAction(choice):
if choice:
if choice[1] == "folder":
if not createDir(ret):
self.session.open(MessageBox, _("Creating directory %s failed.") % (ret), type = MessageBox.TYPE_ERROR)
return
self.close(ret)
else:
self.cancel()
self.session.openWithCallback(dirAction, ChoiceBox, title=text, list=menu)
return
self.close(ret)
def select(self):
currentFolder = self.getPreferredFolder()
# Do nothing unless current Directory is valid
if currentFolder is not None:
# Check if we need to have a minimum of free Space available
if self.minFree is not None:
# Try to read fs stats
try:
s = os.statvfs(currentFolder)
if (s.f_bavail * s.f_bsize) / 1000000 > self.minFree:
# Automatically confirm if we have enough free disk Space available
return self.selectConfirmed(True)
except OSError:
pass
# Ask User if he really wants to select this folder
self.session.openWithCallback(
self.selectConfirmed,
MessageBox,
_("There might not be enough Space on the selected Partition.\nDo you really want to continue?"),
type = MessageBox.TYPE_YESNO
)
# No minimum free Space means we can safely close
else:
self.selectConfirmed(True)
def changeName(self):
if self.filename != "":
# TODO: Add Information that changing extension is bad? disallow?
self.session.openWithCallback(
self.nameChanged,
InputBox,
title = _("Please enter a new filename"),
text = self.filename
)
def nameChanged(self, res):
if res is not None:
if len(res):
self.filename = res
self.updateTarget()
else:
self.session.open(
MessageBox,
_("An empty filename is illegal."),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def updateTarget(self):
# Write Combination of Folder & Filename when Folder is valid
currFolder = self.getPreferredFolder()
if currFolder is not None:
self["target"].setText(''.join((currFolder, self.filename)))
# Display a Warning otherwise
else:
self["target"].setText(_("Invalid location"))
def showMenu(self):
if not self.userMode and self.realBookmarks:
if self.currList == "filelist":
menu = [
(_("switch to bookmarks"), self.switchToBookList),
(_("add bookmark"), self.addRemoveBookmark)
]
if self.editDir:
menu.extend((
(_("create directory"), self.createDir),
(_("remove directory"), self.removeDir)
))
else:
menu = (
(_("switch to filelist"), self.switchToFileList),
(_("remove bookmark"), self.addRemoveBookmark)
)
self.session.openWithCallback(
self.menuCallback,
ChoiceBox,
title = "",
list = menu
)
def menuCallback(self, choice):
if choice:
choice[1]()
def usermodeOn(self):
self.switchToBookList()
self["filelist"].hide()
self["key_blue"].hide()
def keyNumberGlobal(self, number):
# Cancel Timeout
self.qs_timer.stop()
# See if another key was pressed before
if number != self.lastKey:
# Reset lastKey again so NumericalTextInput triggers its keychange
self.nextKey()
# Try to select what was typed
self.selectByStart()
# Increment position
self.curr_pos += 1
# Get char and append to text
char = self.getKey(number)
self.quickselect = self.quickselect[:self.curr_pos] + unicode(char)
# Start Timeout
self.qs_timer_type = 0
self.qs_timer.start(1000, 1)
def selectByStart(self):
# Don't do anything on initial call
if not self.quickselect:
return
# Don't select if no dir
if self["filelist"].getCurrentDirectory():
# TODO: implement proper method in Components.FileList
files = self["filelist"].getFileList()
# Initialize index
idx = 0
# We select by filename which is absolute
lookfor = self["filelist"].getCurrentDirectory() + self.quickselect
# Select file starting with generated text
for file in files:
if file[0][0] and file[0][0].lower().startswith(lookfor):
self["filelist"].instance.moveSelectionTo(idx)
break
idx += 1
def timeout(self, force = False):
# Timeout Key
if not force and self.qs_timer_type == 0:
# Try to select what was typed
self.selectByStart()
# Reset Key
self.lastKey = -1
# Change type
self.qs_timer_type = 1
# Start timeout again
self.qs_timer.start(1000, 1)
# Timeout Quickselect
else:
# Eventually stop Timer
self.qs_timer.stop()
# Invalidate
self.lastKey = -1
self.curr_pos = -1
self.quickselect = ""
def __repr__(self):
return str(type(self)) + "(" + self.text + ")"
def MovieLocationBox(session, text, dir, filename = "", minFree = None):
return LocationBox(session, text = text, filename = filename, currDir = dir, bookmarks = config.movielist.videodirs, autoAdd = True, editDir = True, inhibitDirs = defaultInhibitDirs, minFree = minFree)
class TimeshiftLocationBox(LocationBox):
def __init__(self, session):
LocationBox.__init__(
self,
session,
text = _("Where to save temporary timeshift recordings?"),
currDir = config.usage.timeshift_path.value,
bookmarks = config.usage.allowed_timeshift_paths,
autoAdd = True,
editDir = True,
inhibitDirs = defaultInhibitDirs,
minFree = 1024 # the same requirement is hardcoded in servicedvb.cpp
)
self.skinName = "LocationBox"
def cancel(self):
config.usage.timeshift_path.cancel()
LocationBox.cancel(self)
def selectConfirmed(self, ret):
if ret:
config.usage.timeshift_path.value = self.getPreferredFolder()
config.usage.timeshift_path.save()
LocationBox.selectConfirmed(self, ret)
| gpl-2.0 | -4,404,007,830,922,448,000 | -5,393,513,269,792,044,000 | 29.458633 | 228 | 0.68013 | false |
groovecoder/kuma | vendor/packages/nose/config.py | 48 | 25238 | import logging
import optparse
import os
import re
import sys
import ConfigParser
from optparse import OptionParser
from nose.util import absdir, tolist
from nose.plugins.manager import NoPlugins
from warnings import warn, filterwarnings
log = logging.getLogger(__name__)
# not allowed in config files
option_blacklist = ['help', 'verbose']
config_files = [
# Linux users will prefer this
"~/.noserc",
# Windows users will prefer this
"~/nose.cfg"
]
# plaforms on which the exe check defaults to off
# Windows and IronPython
exe_allowed_platforms = ('win32', 'cli')
filterwarnings("always", category=DeprecationWarning,
module=r'(.*\.)?nose\.config')
class NoSuchOptionError(Exception):
def __init__(self, name):
Exception.__init__(self, name)
self.name = name
class ConfigError(Exception):
pass
class ConfiguredDefaultsOptionParser(object):
"""
Handler for options from commandline and config files.
"""
def __init__(self, parser, config_section, error=None, file_error=None):
self._parser = parser
self._config_section = config_section
if error is None:
error = self._parser.error
self._error = error
if file_error is None:
file_error = lambda msg, **kw: error(msg)
self._file_error = file_error
def _configTuples(self, cfg, filename):
config = []
if self._config_section in cfg.sections():
for name, value in cfg.items(self._config_section):
config.append((name, value, filename))
return config
def _readFromFilenames(self, filenames):
config = []
for filename in filenames:
cfg = ConfigParser.RawConfigParser()
try:
cfg.read(filename)
except ConfigParser.Error, exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
config.extend(self._configTuples(cfg, filename))
return config
def _readFromFileObject(self, fh):
cfg = ConfigParser.RawConfigParser()
try:
filename = fh.name
except AttributeError:
filename = '<???>'
try:
cfg.readfp(fh)
except ConfigParser.Error, exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
return self._configTuples(cfg, filename)
def _readConfiguration(self, config_files):
try:
config_files.readline
except AttributeError:
filename_or_filenames = config_files
if isinstance(filename_or_filenames, basestring):
filenames = [filename_or_filenames]
else:
filenames = filename_or_filenames
config = self._readFromFilenames(filenames)
else:
fh = config_files
config = self._readFromFileObject(fh)
return config
def _processConfigValue(self, name, value, values, parser):
opt_str = '--' + name
option = parser.get_option(opt_str)
if option is None:
raise NoSuchOptionError(name)
else:
option.process(opt_str, value, values, parser)
def _applyConfigurationToValues(self, parser, config, values):
for name, value, filename in config:
if name in option_blacklist:
continue
try:
self._processConfigValue(name, value, values, parser)
except NoSuchOptionError, exc:
self._file_error(
"Error reading config file %r: "
"no such option %r" % (filename, exc.name),
name=name, filename=filename)
except optparse.OptionValueError, exc:
msg = str(exc).replace('--' + name, repr(name), 1)
self._file_error("Error reading config file %r: "
"%s" % (filename, msg),
name=name, filename=filename)
def parseArgsAndConfigFiles(self, args, config_files):
values = self._parser.get_default_values()
try:
config = self._readConfiguration(config_files)
except ConfigError, exc:
self._error(str(exc))
else:
try:
self._applyConfigurationToValues(self._parser, config, values)
except ConfigError, exc:
self._error(str(exc))
return self._parser.parse_args(args, values)
class Config(object):
"""nose configuration.
Instances of Config are used throughout nose to configure
behavior, including plugin lists. Here are the default values for
all config keys::
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFiles = (re.compile(r'^\.'),
re.compile(r'^_'),
re.compile(r'^setup\.py$')
)
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = ()
self.verbosity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = None
"""
def __init__(self, **kw):
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatchPat = env.get('NOSE_TESTMATCH',
r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
self.testMatch = re.compile(self.testMatchPat)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFilesDefaultStrings = [r'^\.',
r'^_',
r'^setup\.py$',
]
self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = []
self.verbosity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = os.getcwd()
self.traverseNamespace = False
self.firstPackageWins = False
self.parserClass = OptionParser
self.worker = False
self._default = self.__dict__.copy()
self.update(kw)
self._orig = self.__dict__.copy()
def __getstate__(self):
state = self.__dict__.copy()
del state['stream']
del state['_orig']
del state['_default']
del state['env']
del state['logStream']
# FIXME remove plugins, have only plugin manager class
state['plugins'] = self.plugins.__class__
return state
def __setstate__(self, state):
plugincls = state.pop('plugins')
self.update(state)
self.worker = True
# FIXME won't work for static plugin lists
self.plugins = plugincls()
self.plugins.loadPlugins()
# needed so .can_configure gets set appropriately
dummy_parser = self.parserClass()
self.plugins.addOptions(dummy_parser, {})
self.plugins.configure(self.options, self)
def __repr__(self):
d = self.__dict__.copy()
# don't expose env, could include sensitive info
d['env'] = {}
keys = [ k for k in d.keys()
if not k.startswith('_') ]
keys.sort()
return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
for k in keys ])
__str__ = __repr__
def _parseArgs(self, argv, cfg_files):
def warn_sometimes(msg, name=None, filename=None):
if (hasattr(self.plugins, 'excludedOption') and
self.plugins.excludedOption(name)):
msg = ("Option %r in config file %r ignored: "
"excluded by runtime environment" %
(name, filename))
warn(msg, RuntimeWarning)
else:
raise ConfigError(msg)
parser = ConfiguredDefaultsOptionParser(
self.getParser(), self.configSection, file_error=warn_sometimes)
return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
def configure(self, argv=None, doc=None):
"""Configure the nose running environment. Execute configure before
collecting tests with nose.TestCollector to enable output capture and
other features.
"""
env = self.env
if argv is None:
argv = sys.argv
cfg_files = getattr(self, 'files', [])
options, args = self._parseArgs(argv, cfg_files)
# If -c --config has been specified on command line,
# load those config files and reparse
if getattr(options, 'files', []):
options, args = self._parseArgs(argv, options.files)
self.options = options
if args:
self.testNames = args
if options.testNames is not None:
self.testNames.extend(tolist(options.testNames))
if options.py3where is not None:
if sys.version_info >= (3,):
options.where = options.py3where
# `where` is an append action, so it can't have a default value
# in the parser, or that default will always be in the list
if not options.where:
options.where = env.get('NOSE_WHERE', None)
# include and exclude also
if not options.ignoreFiles:
options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
if not options.include:
options.include = env.get('NOSE_INCLUDE', [])
if not options.exclude:
options.exclude = env.get('NOSE_EXCLUDE', [])
self.addPaths = options.addPaths
self.stopOnError = options.stopOnError
self.verbosity = options.verbosity
self.includeExe = options.includeExe
self.traverseNamespace = options.traverseNamespace
self.debug = options.debug
self.debugLog = options.debugLog
self.loggingConfig = options.loggingConfig
self.firstPackageWins = options.firstPackageWins
self.configureLogging()
if not options.byteCompile:
sys.dont_write_bytecode = True
if options.where is not None:
self.configureWhere(options.where)
if options.testMatch:
self.testMatch = re.compile(options.testMatch)
if options.ignoreFiles:
self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
log.info("Ignoring files matching %s", options.ignoreFiles)
else:
log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
if options.include:
self.include = map(re.compile, tolist(options.include))
log.info("Including tests matching %s", options.include)
if options.exclude:
self.exclude = map(re.compile, tolist(options.exclude))
log.info("Excluding tests matching %s", options.exclude)
# When listing plugins we don't want to run them
if not options.showPlugins:
self.plugins.configure(options, self)
self.plugins.begin()
def configureLogging(self):
"""Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
"""
if self.loggingConfig:
from logging.config import fileConfig
fileConfig(self.loggingConfig)
return
format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
if self.debugLog:
handler = logging.FileHandler(self.debugLog)
else:
handler = logging.StreamHandler(self.logStream)
handler.setFormatter(format)
logger = logging.getLogger('nose')
logger.propagate = 0
# only add our default handler if there isn't already one there
# this avoids annoying duplicate log messages.
found = False
if self.debugLog:
debugLogAbsPath = os.path.abspath(self.debugLog)
for h in logger.handlers:
if type(h) == logging.FileHandler and \
h.baseFilename == debugLogAbsPath:
found = True
else:
for h in logger.handlers:
if type(h) == logging.StreamHandler and \
h.stream == self.logStream:
found = True
if not found:
logger.addHandler(handler)
# default level
lvl = logging.WARNING
if self.verbosity >= 5:
lvl = 0
elif self.verbosity >= 4:
lvl = logging.DEBUG
elif self.verbosity >= 3:
lvl = logging.INFO
logger.setLevel(lvl)
# individual overrides
if self.debug:
# no blanks
debug_loggers = [ name for name in self.debug.split(',')
if name ]
for logger_name in debug_loggers:
l = logging.getLogger(logger_name)
l.setLevel(logging.DEBUG)
if not l.handlers and not logger_name.startswith('nose'):
l.addHandler(handler)
def configureWhere(self, where):
"""Configure the working directory or directories for the test run.
"""
from nose.importer import add_path
self.workingDir = None
where = tolist(where)
warned = False
for path in where:
if not self.workingDir:
abs_path = absdir(path)
if abs_path is None:
raise ValueError("Working directory '%s' not found, or "
"not a directory" % path)
log.info("Set working dir to %s", abs_path)
self.workingDir = abs_path
if self.addPaths and \
os.path.exists(os.path.join(abs_path, '__init__.py')):
log.info("Working directory %s is a package; "
"adding to sys.path" % abs_path)
add_path(abs_path)
continue
if not warned:
warn("Use of multiple -w arguments is deprecated and "
"support may be removed in a future release. You can "
"get the same behavior by passing directories without "
"the -w argument on the command line, or by using the "
"--tests argument in a configuration file.",
DeprecationWarning)
warned = True
self.testNames.append(path)
def default(self):
"""Reset all config values to defaults.
"""
self.__dict__.update(self._default)
def getParser(self, doc=None):
"""Get the command line option parser.
"""
if self.parser:
return self.parser
env = self.env
parser = self.parserClass(doc)
parser.add_option(
"-V","--version", action="store_true",
dest="version", default=False,
help="Output nose version and exit")
parser.add_option(
"-p", "--plugins", action="store_true",
dest="showPlugins", default=False,
help="Output list of available plugins and exit. Combine with "
"higher verbosity for greater detail")
parser.add_option(
"-v", "--verbose",
action="count", dest="verbosity",
default=self.verbosity,
help="Be more verbose. [NOSE_VERBOSE]")
parser.add_option(
"--verbosity", action="store", dest="verbosity",
metavar='VERBOSITY',
type="int", help="Set verbosity; --verbosity=2 is "
"the same as -v")
parser.add_option(
"-q", "--quiet", action="store_const", const=0, dest="verbosity",
help="Be less verbose")
parser.add_option(
"-c", "--config", action="append", dest="files",
metavar="FILES",
help="Load configuration from config file(s). May be specified "
"multiple times; in that case, all config files will be "
"loaded and combined")
parser.add_option(
"-w", "--where", action="append", dest="where",
metavar="WHERE",
help="Look for tests in this directory. "
"May be specified multiple times. The first directory passed "
"will be used as the working directory, in place of the current "
"working directory, which is the default. Others will be added "
"to the list of tests to execute. [NOSE_WHERE]"
)
parser.add_option(
"--py3where", action="append", dest="py3where",
metavar="PY3WHERE",
help="Look for tests in this directory under Python 3.x. "
"Functions the same as 'where', but only applies if running under "
"Python 3.x or above. Note that, if present under 3.x, this "
"option completely replaces any directories specified with "
"'where', so the 'where' option becomes ineffective. "
"[NOSE_PY3WHERE]"
)
parser.add_option(
"-m", "--match", "--testmatch", action="store",
dest="testMatch", metavar="REGEX",
help="Files, directories, function names, and class names "
"that match this regular expression are considered tests. "
"Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
default=self.testMatchPat)
parser.add_option(
"--tests", action="store", dest="testNames", default=None,
metavar='NAMES',
help="Run these tests (comma-separated list). This argument is "
"useful mainly from configuration files; on the command line, "
"just pass the tests to run as additional arguments with no "
"switch.")
parser.add_option(
"-l", "--debug", action="store",
dest="debug", default=self.debug,
help="Activate debug logging for one or more systems. "
"Available debug loggers: nose, nose.importer, "
"nose.inspector, nose.plugins, nose.result and "
"nose.selector. Separate multiple names with a comma.")
parser.add_option(
"--debug-log", dest="debugLog", action="store",
default=self.debugLog, metavar="FILE",
help="Log debug messages to this file "
"(default: sys.stderr)")
parser.add_option(
"--logging-config", "--log-config",
dest="loggingConfig", action="store",
default=self.loggingConfig, metavar="FILE",
help="Load logging config from this file -- bypasses all other"
" logging config settings.")
parser.add_option(
"-I", "--ignore-files", action="append", dest="ignoreFiles",
metavar="REGEX",
help="Completely ignore any file that matches this regular "
"expression. Takes precedence over any other settings or "
"plugins. "
"Specifying this option will replace the default setting. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_IGNORE_FILES]")
parser.add_option(
"-e", "--exclude", action="append", dest="exclude",
metavar="REGEX",
help="Don't run tests that match regular "
"expression [NOSE_EXCLUDE]")
parser.add_option(
"-i", "--include", action="append", dest="include",
metavar="REGEX",
help="This regular expression will be applied to files, "
"directories, function names, and class names for a chance "
"to include additional tests that do not match TESTMATCH. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_INCLUDE]")
parser.add_option(
"-x", "--stop", action="store_true", dest="stopOnError",
default=self.stopOnError,
help="Stop running tests after the first error or failure")
parser.add_option(
"-P", "--no-path-adjustment", action="store_false",
dest="addPaths",
default=self.addPaths,
help="Don't make any changes to sys.path when "
"loading tests [NOSE_NOPATH]")
parser.add_option(
"--exe", action="store_true", dest="includeExe",
default=self.includeExe,
help="Look for tests in python modules that are "
"executable. Normal behavior is to exclude executable "
"modules, since they may not be import-safe "
"[NOSE_INCLUDE_EXE]")
parser.add_option(
"--noexe", action="store_false", dest="includeExe",
help="DO NOT look for tests in python modules that are "
"executable. (The default on the windows platform is to "
"do so.)")
parser.add_option(
"--traverse-namespace", action="store_true",
default=self.traverseNamespace, dest="traverseNamespace",
help="Traverse through all path entries of a namespace package")
parser.add_option(
"--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
action="store_true", default=False, dest="firstPackageWins",
help="nose's importer will normally evict a package from sys."
"modules if it sees a package with the same name in a different "
"location. Set this option to disable that behavior.")
parser.add_option(
"--no-byte-compile",
action="store_false", default=True, dest="byteCompile",
help="Prevent nose from byte-compiling the source into .pyc files "
"while nose is scanning for and running tests.")
self.plugins.loadPlugins()
self.pluginOpts(parser)
self.parser = parser
return parser
def help(self, doc=None):
"""Return the generated help message
"""
return self.getParser(doc).format_help()
def pluginOpts(self, parser):
self.plugins.addOptions(parser, self.env)
def reset(self):
self.__dict__.update(self._orig)
def todict(self):
return self.__dict__.copy()
def update(self, d):
self.__dict__.update(d)
class NoOptions(object):
"""Options container that returns None for all options.
"""
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
def __getnewargs__(self):
return ()
def __nonzero__(self):
return False
def user_config_files():
"""Return path to any existing user config files
"""
return filter(os.path.exists,
map(os.path.expanduser, config_files))
def all_config_files():
"""Return path to any existing user config files, plus any setup.cfg
in the current working directory.
"""
user = user_config_files()
if os.path.exists('setup.cfg'):
return user + ['setup.cfg']
return user
# used when parsing config files
def flag(val):
"""Does the value look like an on/off flag?"""
if val == 1:
return True
elif val == 0:
return False
val = str(val)
if len(val) > 5:
return False
return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
def _bool(val):
return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
| mpl-2.0 | -438,182,312,335,136,800 | -8,472,053,574,370,938,000 | 37.181543 | 82 | 0.563793 | false |
ngoix/OCRF | sklearn/neighbors/setup.py | 308 | 1219 | import os
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('neighbors', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('ball_tree',
sources=['ball_tree.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('kd_tree',
sources=['kd_tree.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('dist_metrics',
sources=['dist_metrics.c'],
include_dirs=[numpy.get_include(),
os.path.join(numpy.get_include(),
'numpy')],
libraries=libraries)
config.add_extension('typedefs',
sources=['typedefs.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
| bsd-3-clause | 2,983,716,983,857,690,000 | -6,407,658,571,536,787,000 | 33.828571 | 72 | 0.474159 | false |
dpausp/arguments | tests/concepts/document/test_documents.py | 1 | 1326 | import factory
from assert_helpers import assert_difference, assert_no_difference
from ekklesia_portal.datamodel import Document
from webtest_helpers import assert_deform, fill_form
def test_create_document(client, db_query, document_factory, proposition_type_factory, logged_in_department_admin):
department = logged_in_department_admin.managed_departments[0]
area = department.areas[0]
proposition_type = proposition_type_factory()
data = factory.build(dict, FACTORY_CLASS=document_factory)
del data['area']
data['area_id'] = area.id
del data['proposition_type']
data['proposition_type_id'] = proposition_type.id
res = client.get('/documents/+new')
form = assert_deform(res)
fill_form(form, data)
with assert_difference(db_query(Document).count, 1):
form.submit(status=302)
def test_update_document(db_session, client, document_factory, logged_in_department_admin):
department = logged_in_department_admin.managed_departments[0]
area = department.areas[0]
document = document_factory(area=area)
res = client.get(f'/documents/{document.id}/+edit')
expected = document.to_dict()
form = assert_deform(res, expected)
form['description'] = 'new description'
form.submit(status=302)
assert document.description == 'new description'
| agpl-3.0 | -4,847,559,974,674,684,000 | 8,848,452,441,058,304,000 | 38 | 115 | 0.723982 | false |
leiferikb/bitpop | src/tools/python/google/gethash_timer.py | 182 | 4366 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Issue a series of GetHash requests to the SafeBrowsing servers and measure
the response times.
Usage:
$ ./gethash_timer.py --period=600 --samples=20 --output=resp.csv
--period (or -p): The amount of time (in seconds) to wait between GetHash
requests. Using a value of more than 300 (5 minutes) to
include the effect of DNS.
--samples (or -s): The number of requests to issue. If this parameter is not
specified, the test will run indefinitely.
--output (or -o): The path to a file where the output will be written in
CSV format: sample_number,response_code,elapsed_time_ms
"""
import getopt
import httplib
import sys
import time
_GETHASH_HOST = 'safebrowsing.clients.google.com'
_GETHASH_REQUEST = (
'/safebrowsing/gethash?client=googleclient&appver=1.0&pver=2.1')
# Global logging file handle.
g_file_handle = None
def IssueGetHash(prefix):
'''Issue one GetHash request to the safebrowsing servers.
Args:
prefix: A 4 byte value to look up on the server.
Returns:
The HTTP response code for the GetHash request.
'''
body = '4:4\n' + prefix
h = httplib.HTTPConnection(_GETHASH_HOST)
h.putrequest('POST', _GETHASH_REQUEST)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
response_code = h.getresponse().status
h.close()
return response_code
def TimedGetHash(prefix):
'''Measure the amount of time it takes to receive a GetHash response.
Args:
prefix: A 4 byte value to look up on the the server.
Returns:
A tuple of HTTP resonse code and the response time (in milliseconds).
'''
start = time.time()
response_code = IssueGetHash(prefix)
return response_code, (time.time() - start) * 1000
def RunTimedGetHash(period, samples=None):
'''Runs an experiment to measure the amount of time it takes to receive
multiple responses from the GetHash servers.
Args:
period: A floating point value that indicates (in seconds) the delay
between requests.
samples: An integer value indicating the number of requests to make.
If 'None', the test continues indefinitely.
Returns:
None.
'''
global g_file_handle
prefix = '\x50\x61\x75\x6c'
sample_count = 1
while True:
response_code, elapsed_time = TimedGetHash(prefix)
LogResponse(sample_count, response_code, elapsed_time)
sample_count += 1
if samples is not None and sample_count == samples:
break
time.sleep(period)
def LogResponse(sample_count, response_code, elapsed_time):
'''Output the response for one GetHash query.
Args:
sample_count: The current sample number.
response_code: The HTTP response code for the GetHash request.
elapsed_time: The round-trip time (in milliseconds) for the
GetHash request.
Returns:
None.
'''
global g_file_handle
output_list = (sample_count, response_code, elapsed_time)
print 'Request: %d, status: %d, elapsed time: %f ms' % output_list
if g_file_handle is not None:
g_file_handle.write(('%d,%d,%f' % output_list) + '\n')
g_file_handle.flush()
def SetupOutputFile(file_name):
'''Open a file for logging results.
Args:
file_name: A path to a file to store the output.
Returns:
None.
'''
global g_file_handle
g_file_handle = open(file_name, 'w')
def main():
period = 10
samples = None
options, args = getopt.getopt(sys.argv[1:],
's:p:o:',
['samples=', 'period=', 'output='])
for option, value in options:
if option == '-s' or option == '--samples':
samples = int(value)
elif option == '-p' or option == '--period':
period = float(value)
elif option == '-o' or option == '--output':
file_name = value
else:
print 'Bad option: %s' % option
return 1
try:
print 'Starting Timed GetHash ----------'
SetupOutputFile(file_name)
RunTimedGetHash(period, samples)
except KeyboardInterrupt:
pass
print 'Timed GetHash complete ----------'
g_file_handle.close()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -2,945,765,957,479,331,000 | 3,293,283,431,397,287,400 | 28.302013 | 78 | 0.653 | false |
kn-bibs/dotplot | dotplot/matrices.py | 1 | 3681 | """This module works with similarity matrices of aminoacids"""
import os
available_matrices = {
'PAM120': 'matrices/PAM120.txt'
}
class SimilarityMatrix:
def __init__(self, name):
filename = available_matrices[name]
# get the raw matrix from file
matrix = self.read_raw_matrix(filename)
# get minimum and maximum value from the matrix
self.scaling_factors = self.get_min_and_max(matrix)
# transform numerical values from raw matrix into percentages
self.matrix = self.normalize(matrix)
@staticmethod
def get_min_and_max(matrix):
"""Get minimal and maximal value occuring in given matrix."""
aminoacids = list(matrix.keys())
minimal_value = None
maximal_value = None
if aminoacids:
first_aa = aminoacids[0]
minimal_value = matrix[first_aa][first_aa]
maximal_value = matrix[first_aa][first_aa]
for index_1, aa_1 in enumerate(aminoacids):
for index_2 in range(index_1, len(aminoacids)):
aa_2 = aminoacids[index_2]
value = matrix[aa_1][aa_2]
minimal_value = min(minimal_value, value)
maximal_value = max(maximal_value, value)
return {
'min': minimal_value,
'max': maximal_value
}
def normalize(self, matrix):
"""Transform numerical values from raw matrix into percentages.
For example: if we had values from -5 to 5, then now:
-5 will be 0,
5 will be 1,
0 will be 0.5, and so on.
"""
aminoacids = matrix.keys()
min_value = self.scaling_factors['min']
max_value = self.scaling_factors['max']
scale_range = max_value - min_value
for aa_1 in aminoacids:
for aa_2 in aminoacids:
value = matrix[aa_1][aa_2]
matrix[aa_1][aa_2] = (value - min_value) / scale_range
return matrix
@staticmethod
def read_raw_matrix(filename):
"""This function converts the matrix into a dictionary"""
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
filename
)
with open(path) as f:
lines = f.readlines()
matrix = {}
# read aminoacids' order from first line and make all the letters
# representing aminoacids uppercase (so we don't need to think
# about this later
aa_list = [aa.upper() for aa in lines[0].split()]
# initialize matrix
for aa_name in aa_list:
matrix[aa_name] = {}
# set corresponding values for each aminoacid
for line in lines[1:]:
data = line.split()
aa_name = data[0].upper()
# exctract values from all the columns but the first one
# and convert them to intigers (from strings)
values = [
int(value)
for value in data[1:]
]
matrix[aa_name] = dict(zip(aa_list, values))
return matrix
def get_value(self, aa_1, aa_2):
"""This function returns similarity values for 2 aminoacids
Args:
aa_1: a letter representing first aminoacid
aa_2: a letter representing second aminoacid
"""
# we want to return correct value no matter if users gives us
# aa_1 = t, aa_2 = c or aa_1 = T, aa_2 = C, hence uppercase
aa_1 = aa_1.upper()
aa_2 = aa_2.upper()
return self.matrix[aa_1][aa_2]
| lgpl-3.0 | 4,191,791,402,052,067,300 | -2,203,131,224,185,950,500 | 30.194915 | 77 | 0.549307 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/local_network_gateway.py | 1 | 3220 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_11_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LocalNetworkGateway, self).__init__(**kwargs)
self.local_network_address_space = kwargs.get('local_network_address_space', None)
self.gateway_ip_address = kwargs.get('gateway_ip_address', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
| mit | 6,306,756,308,578,801,000 | -3,934,550,553,931,779,000 | 40.818182 | 110 | 0.618012 | false |
kmod/icbd | stdlib/python2.5/lib-tk/Canvas.py | 19 | 7344 | # This module exports classes for the various canvas item types
# NOTE: This module was an experiment and is now obsolete.
# It's best to use the Tkinter.Canvas class directly.
from Tkinter import Canvas, _cnfmerge, _flatten
class CanvasItem:
def __init__(self, canvas, itemType, *args, **kw):
self.canvas = canvas
self.id = canvas._create(itemType, args, kw)
if not hasattr(canvas, 'items'):
canvas.items = {}
canvas.items[self.id] = self
def __str__(self):
return str(self.id)
def __repr__(self):
return '<%s, id=%d>' % (self.__class__.__name__, self.id)
def delete(self):
del self.canvas.items[self.id]
self.canvas.delete(self.id)
def __getitem__(self, key):
v = self.canvas.tk.split(self.canvas.tk.call(
self.canvas._w, 'itemconfigure',
self.id, '-' + key))
return v[4]
cget = __getitem__
def __setitem__(self, key, value):
self.canvas.itemconfig(self.id, {key: value})
def keys(self):
if not hasattr(self, '_keys'):
self._keys = map(lambda x, tk=self.canvas.tk:
tk.splitlist(x)[0][1:],
self.canvas.tk.splitlist(
self.canvas._do(
'itemconfigure',
(self.id,))))
return self._keys
def has_key(self, key):
return key in self.keys()
def __contains__(self, key):
return key in self.keys()
def addtag(self, tag, option='withtag'):
self.canvas.addtag(tag, option, self.id)
def bbox(self):
x1, y1, x2, y2 = self.canvas.bbox(self.id)
return (x1, y1), (x2, y2)
def bind(self, sequence=None, command=None, add=None):
return self.canvas.tag_bind(self.id, sequence, command, add)
def unbind(self, sequence, funcid=None):
self.canvas.tag_unbind(self.id, sequence, funcid)
def config(self, cnf={}, **kw):
return self.canvas.itemconfig(self.id, _cnfmerge((cnf, kw)))
def coords(self, pts = ()):
flat = ()
for x, y in pts: flat = flat + (x, y)
return self.canvas.coords(self.id, *flat)
def dchars(self, first, last=None):
self.canvas.dchars(self.id, first, last)
def dtag(self, ttd):
self.canvas.dtag(self.id, ttd)
def focus(self):
self.canvas.focus(self.id)
def gettags(self):
return self.canvas.gettags(self.id)
def icursor(self, index):
self.canvas.icursor(self.id, index)
def index(self, index):
return self.canvas.index(self.id, index)
def insert(self, beforethis, string):
self.canvas.insert(self.id, beforethis, string)
def lower(self, belowthis=None):
self.canvas.tag_lower(self.id, belowthis)
def move(self, xamount, yamount):
self.canvas.move(self.id, xamount, yamount)
def tkraise(self, abovethis=None):
self.canvas.tag_raise(self.id, abovethis)
raise_ = tkraise # BW compat
def scale(self, xorigin, yorigin, xscale, yscale):
self.canvas.scale(self.id, xorigin, yorigin, xscale, yscale)
def type(self):
return self.canvas.type(self.id)
class Arc(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'arc', *args, **kw)
class Bitmap(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'bitmap', *args, **kw)
class ImageItem(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'image', *args, **kw)
class Line(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'line', *args, **kw)
class Oval(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'oval', *args, **kw)
class Polygon(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'polygon', *args, **kw)
class Rectangle(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'rectangle', *args, **kw)
# XXX "Text" is taken by the Text widget...
class CanvasText(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'text', *args, **kw)
class Window(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'window', *args, **kw)
class Group:
def __init__(self, canvas, tag=None):
if not tag:
tag = 'Group%d' % id(self)
self.tag = self.id = tag
self.canvas = canvas
self.canvas.dtag(self.tag)
def str(self):
return self.tag
__str__ = str
def _do(self, cmd, *args):
return self.canvas._do(cmd, (self.tag,) + _flatten(args))
def addtag_above(self, tagOrId):
self._do('addtag', 'above', tagOrId)
def addtag_all(self):
self._do('addtag', 'all')
def addtag_below(self, tagOrId):
self._do('addtag', 'below', tagOrId)
def addtag_closest(self, x, y, halo=None, start=None):
self._do('addtag', 'closest', x, y, halo, start)
def addtag_enclosed(self, x1, y1, x2, y2):
self._do('addtag', 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, x1, y1, x2, y2):
self._do('addtag', 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, tagOrId):
self._do('addtag', 'withtag', tagOrId)
def bbox(self):
return self.canvas._getints(self._do('bbox'))
def bind(self, sequence=None, command=None, add=None):
return self.canvas.tag_bind(self.id, sequence, command, add)
def unbind(self, sequence, funcid=None):
self.canvas.tag_unbind(self.id, sequence, funcid)
def coords(self, *pts):
return self._do('coords', pts)
def dchars(self, first, last=None):
self._do('dchars', first, last)
def delete(self):
self._do('delete')
def dtag(self, tagToDelete=None):
self._do('dtag', tagToDelete)
def focus(self):
self._do('focus')
def gettags(self):
return self.canvas.tk.splitlist(self._do('gettags', self.tag))
def icursor(self, index):
return self._do('icursor', index)
def index(self, index):
return self.canvas.tk.getint(self._do('index', index))
def insert(self, beforeThis, string):
self._do('insert', beforeThis, string)
def config(self, cnf={}, **kw):
return self.canvas.itemconfigure(self.tag, _cnfmerge((cnf,kw)))
def lower(self, belowThis=None):
self._do('lower', belowThis)
def move(self, xAmount, yAmount):
self._do('move', xAmount, yAmount)
def tkraise(self, aboveThis=None):
self._do('raise', aboveThis)
lift = tkraise
def scale(self, xOrigin, yOrigin, xScale, yScale):
self._do('scale', xOrigin, yOrigin, xScale, yScale)
def select_adjust(self, index):
self.canvas._do('select', ('adjust', self.tag, index))
def select_from(self, index):
self.canvas._do('select', ('from', self.tag, index))
def select_to(self, index):
self.canvas._do('select', ('to', self.tag, index))
def type(self):
return self._do('type')
| mit | 4,815,103,169,581,614,000 | 3,748,477,578,285,623,300 | 37.652632 | 71 | 0.582925 | false |
ahb0327/intellij-community | python/lib/Lib/signal.py | 93 | 6858 | """
This module provides mechanisms to use signal handlers in Python.
Functions:
signal(sig,action) -- set the action for a given signal (done)
pause(sig) -- wait until a signal arrives [Unix only]
alarm(seconds) -- cause SIGALRM after a specified time [Unix only]
getsignal(sig) -- get the signal action for a given signal
default_int_handler(action) -- default SIGINT handler (done, but acts string)
Constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
*** IMPORTANT NOTICES ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
According to http://java.sun.com/products/jdk/faq/faq-sun-packages.html
'writing java programs that rely on sun.* is risky: they are not portable, and are not supported.'
However, in Jython, like Python, we let you decide what makes
sense for your application. If sun.misc.Signal is not available,
an ImportError is raised.
"""
try:
import sun.misc.Signal
except ImportError:
raise ImportError("signal module requires sun.misc.Signal, which is not available on this platform")
import os
import sun.misc.SignalHandler
import sys
import threading
import time
from java.lang import IllegalArgumentException
from java.util.concurrent.atomic import AtomicReference
debug = 0
def _init_signals():
# install signals by checking for standard names
# using IllegalArgumentException to diagnose
possible_signals = """
SIGABRT
SIGALRM
SIGBUS
SIGCHLD
SIGCONT
SIGFPE
SIGHUP
SIGILL
SIGINFO
SIGINT
SIGIOT
SIGKILL
SIGPIPE
SIGPOLL
SIGPROF
SIGQUIT
SIGSEGV
SIGSTOP
SIGSYS
SIGTERM
SIGTRAP
SIGTSTP
SIGTTIN
SIGTTOU
SIGURG
SIGUSR1
SIGUSR2
SIGVTALRM
SIGWINCH
SIGXCPU
SIGXFSZ
""".split()
_module = __import__(__name__)
signals = {}
signals_by_name = {}
for signal_name in possible_signals:
try:
java_signal = sun.misc.Signal(signal_name[3:])
except IllegalArgumentException:
continue
signal_number = java_signal.getNumber()
signals[signal_number] = java_signal
signals_by_name[signal_name] = java_signal
setattr(_module, signal_name, signal_number) # install as a module constant
return signals
_signals = _init_signals()
NSIG = max(_signals.iterkeys()) + 1
SIG_DFL = sun.misc.SignalHandler.SIG_DFL # default system handler
SIG_IGN = sun.misc.SignalHandler.SIG_IGN # handler to ignore a signal
class JythonSignalHandler(sun.misc.SignalHandler):
def __init__(self, action):
self.action = action
def handle(self, signal):
# passing a frame here probably don't make sense in a threaded system,
# but perhaps revisit
self.action(signal.getNumber(), None)
def signal(sig, action):
"""
signal(sig, action) -> action
Set the action for the given signal. The action can be SIG_DFL,
SIG_IGN, or a callable Python object. The previous action is
returned. See getsignal() for possible return values.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
# maybe keep a weak ref map of handlers we have returned?
try:
signal = _signals[sig]
except KeyError:
raise ValueError("signal number out of range")
if callable(action):
prev = sun.misc.Signal.handle(signal, JythonSignalHandler(action))
elif action in (SIG_IGN, SIG_DFL) or isinstance(action, sun.misc.SignalHandler):
prev = sun.misc.Signal.handle(signal, action)
else:
raise TypeError("signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object")
if isinstance(prev, JythonSignalHandler):
return prev.action
else:
return prev
# dangerous! don't use!
def getsignal(sig):
"""getsignal(sig) -> action
Return the current action for the given signal. The return value can be:
SIG_IGN -- if the signal is being ignored
SIG_DFL -- if the default action for the signal is in effect
None -- if an unknown handler is in effect
anything else -- the callable Python object used as a handler
Note for Jython: this function is NOT threadsafe. The underlying
Java support only enables getting the current signal handler by
setting a new one. So this is completely prone to race conditions.
"""
try:
signal = _signals[sig]
except KeyError:
raise ValueError("signal number out of range")
current = sun.misc.Signal.handle(signal, SIG_DFL)
sun.misc.Signal.handle(signal, current) # and reinstall
if isinstance(current, JythonSignalHandler):
return current.action
else:
return current
def default_int_handler(sig, frame):
"""
default_int_handler(...)
The default handler for SIGINT installed by Python.
It raises KeyboardInterrupt.
"""
raise KeyboardInterrupt
def pause():
raise NotImplementedError
_alarm_timer_holder = AtomicReference()
def _alarm_handler(sig, frame):
print "Alarm clock"
os._exit(0)
# install a default alarm handler, the one we get by default doesn't
# work terribly well since it throws a bus error (at least on OS X)!
try:
SIGALRM
signal(SIGALRM, _alarm_handler)
except NameError:
pass
class _Alarm(object):
def __init__(self, interval, task):
self.interval = interval
self.task = task
self.scheduled = None
self.timer = threading.Timer(self.interval, self.task)
def start(self):
self.timer.start()
self.scheduled = time.time() + self.interval
def cancel(self):
self.timer.cancel()
now = time.time()
if self.scheduled and self.scheduled > now:
return self.scheduled - now
else:
return 0
def alarm(time):
try:
SIGALRM
except NameError:
raise NotImplementedError("alarm not implemented on this platform")
def raise_alarm():
sun.misc.Signal.raise(_signals[SIGALRM])
if time > 0:
new_alarm_timer = _Alarm(time, raise_alarm)
else:
new_alarm_timer = None
old_alarm_timer = _alarm_timer_holder.getAndSet(new_alarm_timer)
if old_alarm_timer:
scheduled = int(old_alarm_timer.cancel())
else:
scheduled = 0
if new_alarm_timer:
new_alarm_timer.start()
return scheduled
| apache-2.0 | -1,213,948,442,182,875,600 | 3,884,752,407,885,943,300 | 27.694561 | 104 | 0.656605 | false |
shumingch/molecule_simulation | init.py | 1 | 2823 | #init from camera
from bge import logic, render
from particle import Particle
from mathutils import Vector, Matrix
gdict = logic.globalDict
def draw():
camera = scene.objects["Camera"]
string = "BondCraft"
###draws bonds and changes text before frame load
atoms = gdict["atoms"].copy()
for atom in gdict["atoms"]:
###searches for everything connected to it and draws the bonds
#prevents two line draws
atoms.remove(atom)
atom.bond.draw_bonds(atoms)
for molecule in gdict["molecules"]:
molecule.draw_text()
for texture in gdict["textures"]:
texture.refresh(True)
if camera["laser"]:
crosshairs = scene.objects["Crosshairs"]
start = camera.worldPosition + camera.getAxisVect((1,-1,0))
end = camera.worldPosition - camera.getAxisVect((0,0,1))
render.drawLine(start,end,[0,1,0])
obj,point, normal = camera.rayCast(crosshairs,None,2000)
if obj:
render.drawLine(point,point + normal * 10000,[0,1,0])
obj.applyForce(-100 * normal)
def play(cont):
scene.restart()
gdict["play"] = True
UI = cont.owner.scene
UI.end()
def main(cont):
global scene
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
camera = cont.owner
overlay = camera.actuators["Scene"]
# camera state 2 is in the menu
if camera.state == 2:
if "play" not in gdict:
# show menu
cont.activate(overlay)
render.showMouse(True)
logic.setGravity([0,0,-9.8])
else:
# start game
camera.state = 1
render.showMouse(False)
scene.objects["Floor"].endObject()
scene.objects["Spawn"].endObject()
logic.setGravity([0,0,0])
scene.objects["Cube"].visible = True
scene.objects["BondCraft"].visible = True
return
print("###############GAME START##################")
gdict.clear()
gdict["free"] = { "Hydrogen": set(),
"Carbon": set(),
"Oxygen": set(),
"Nitrogen": set(),
"Bromine": set()
}
gdict["cations"] = set()
gdict["atoms"] = set()
gdict["textures"] = []
gdict["molecules"] = set()
gdict["primary"] = "Hydrogen"
gdict["camera"] = scene.objects["Camera"]
gdict["prim_text"] = scene.objects["prim_text"]
gdict["prim_text"].resolution = 16
gdict["text"] = scene.objects["Text"]
gdict["text"].resolution = 16
#bind line drawing function
scene.pre_draw = [draw]
#slow down
#fps =1000
#logic.setLogicTicRate(fps)
#logic.setPhysicsTicRate(fps) | mit | 4,714,114,348,352,173,000 | 5,595,497,280,127,317,000 | 28.416667 | 70 | 0.552604 | false |
orchidinfosys/odoo | addons/survey_crm/survey.py | 47 | 1162 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
class survey_mail_compose_message(osv.TransientModel):
_inherit = 'survey.mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') == 'crm.lead' and context.get('active_ids'):
partner_ids = []
emails_list = []
for lead in self.pool.get('crm.lead').browse(cr, uid, context.get('active_ids'), context=context):
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
else:
email = lead.contact_name and "%s <%s>" % (lead.contact_name, lead.email_from or "") or lead.email_from or None
if email and email not in emails_list:
emails_list.append(email)
multi_email = "\n".join(emails_list)
res.update({'partner_ids': list(set(partner_ids)), 'multi_email': multi_email})
return res
| gpl-3.0 | 2,404,693,007,373,170,700 | -5,798,620,662,426,046,000 | 45.48 | 131 | 0.592943 | false |
jmacmahon/invenio | modules/webstyle/lib/goto_plugins/goto_plugin_cern_hr_documents.py | 3 | 7382 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This implements a redirection for CERN HR Documents in the CERN Document
Server. It's useful as a reference on how goto plugins could be implemented.
"""
import time
import re
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibdocfile import BibRecDocs, InvenioBibDocFileError
def make_cern_ssr_docname(lang, edition, modif=0):
if modif:
return "CERN_SRR_%(lang)s_ed%(edition)02d_modif%(modif)02d" % {
'lang': lang,
'edition': edition,
'modif': modif
}
else:
return "CERN_SRR_%(lang)s_ed%(edition)02d" % {
'lang': lang,
'edition': edition,
}
_RE_REVISION = re.compile(r"rev(\d\d)")
def _get_revision(docname):
"""
Return the revision in a docname. E.g.:
CERN_Circ_Op_en_02_rev01_Implementation measures.pdf -> 1
CERN_Circ_Op_en_02_rev02_Implementation measures.PDF -> 2
"""
g = _RE_REVISION.search(docname)
if g:
return int(g.group(1))
return 0
def _register_document(documents, docname, key):
"""
Register in the documents mapping the docname to key, but only if the
docname has a revision higher of the docname already associated with a key
"""
if key in documents:
if _get_revision(docname) > _get_revision(documents[key]):
documents[key] = docname
else:
documents[key] = docname
def goto(type, document='', number=0, lang='en', modif=0):
today = time.strftime('%Y-%m-%d')
if type == 'SRR':
## We would like a CERN Staff Rules and Regulations
recids = perform_request_search(cc='Staff Rules and Regulations', f="925__a:1996-01-01->%s 925__b:%s->9999-99-99" % (today, today))
recid = recids[-1]
reportnumber = get_fieldvalues(recid, '037__a')[0]
edition = int(reportnumber[-2:]) ## e.g. CERN-STAFF-RULES-ED08
return BibRecDocs(recid).get_bibdoc(make_cern_ssr_docname(lang, edition, modif)).get_file('.pdf').get_url()
elif type == "OPER-CIRC":
recids = perform_request_search(cc="Operational Circulars", p="reportnumber:\"CERN-OPER-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation-en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation-fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving-en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving-fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex-fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex-en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
try:
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
except InvenioBibDocFileError:
return bibrecdocs.get_bibdoc(documents[document]).get_file('.PDF').get_url()
elif type == 'ADMIN-CIRC':
recids = perform_request_search(cc="Administrative Circulars", p='reportnumber:"CERN-ADMIN-CIRC-%s-*"' % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation-en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation-fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving-en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving-fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex-fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex-en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
try:
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
except InvenioBibDocFileError:
return bibrecdocs.get_bibdoc(documents[document]).get_file('.PDF').get_url()
def register_hr_redirections():
"""
Run this only once
"""
from invenio.goto_engine import register_redirection
plugin = 'goto_plugin_cern_hr_documents'
## Staff rules and regulations
for modif in range(1, 20):
for lang in ('en', 'fr'):
register_redirection('hr-srr-modif%02d-%s' % (modif, lang), plugin, parameters={'type': 'SRR', 'lang': lang, 'modif': modif})
for lang in ('en', 'fr'):
register_redirection('hr-srr-%s' % lang, plugin, parameters={'type': 'SRR', 'lang': lang, 'modif': 0})
## Operational Circulars
for number in range(1, 10):
for lang in ('en', 'fr'):
register_redirection('hr-oper-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': lang, 'number': number})
for number, special_document in ((2, 'implementation'), (2, 'annex'), (3, 'archiving'), (3, 'annex')):
for lang in ('en', 'fr'):
register_redirection('hr-circ-%s-%s-%s' % (number, special_document, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': '%s-%s' % (special_document, lang), 'number': number})
## Administrative Circulars:
for number in range(1, 32):
for lang in ('en', 'fr'):
register_redirection('hr-admin-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'ADMIN-CIRC', 'document': lang, 'number': number})
if __name__ == "__main__":
register_hr_redirections()
| gpl-2.0 | 1,719,696,948,373,505,300 | -7,806,796,991,834,599,000 | 44.850932 | 195 | 0.622054 | false |
ankit318/appengine-mapreduce | python/test/mapreduce/status_test.py | 12 | 20599 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import time
import unittest
try:
import json
except ImportError:
import simplejson as json
from google.appengine.api import yaml_errors
from google.appengine.ext import db
from mapreduce import errors
from mapreduce import handlers
from mapreduce import status
from testlib import testutil
from mapreduce import test_support
from google.appengine.ext.webapp import mock_webapp
class TestKind(db.Model):
"""Used for testing."""
foobar = db.StringProperty(default="meep")
def TestMap(entity):
"""Used for testing."""
pass
class MapreduceYamlTest(unittest.TestCase):
"""Testing mapreduce.yaml-related functionality."""
def set_up_directory_tree(self, dir_tree_contents):
"""Create directory tree from dict of path:contents entries."""
for full_path, contents in dir_tree_contents.iteritems():
dir_name = os.path.dirname(full_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
f = open(full_path, 'w')
f.write(contents)
f.close()
def setUp(self):
"""Initialize temporary application variable."""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Remove temporary application directory."""
if self.tempdir:
shutil.rmtree(self.tempdir)
def testFindYamlFile(self):
"""Test if mapreduce.yaml can be found with different app/library trees."""
test_status = os.path.join(self.tempdir, "library_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertTrue(os.path.samefile(test_mapreduce_yaml, yaml_loc))
def testFindYamlFileSameTree(self):
"""Test if mapreduce.yaml can be found with the same app/library tree."""
test_status = os.path.join(self.tempdir, "application_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testParseEmptyFile(self):
"""Parsing empty mapreduce.yaml file."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"")
def testParse(self):
"""Parsing a single document in mapreduce.yaml."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
self.assertTrue(mr_yaml)
self.assertEquals(2, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Validator1",
mr_yaml.mapreduce[0].mapper.params_validator)
self.assertEquals(3, len(mr_yaml.mapreduce[0].mapper.params))
self.assertEquals("entity_kind", mr_yaml.mapreduce[0].mapper.params[0].name)
self.assertEquals("Kind1", mr_yaml.mapreduce[0].mapper.params[0].default)
self.assertEquals("human_supplied1",
mr_yaml.mapreduce[0].mapper.params[1].name)
self.assertEquals("human_supplied2",
mr_yaml.mapreduce[0].mapper.params[2].name)
self.assertEquals("Mapreduce2", mr_yaml.mapreduce[1].name)
self.assertEquals("Handler2", mr_yaml.mapreduce[1].mapper.handler)
self.assertEquals("Reader2", mr_yaml.mapreduce[1].mapper.input_reader)
def testParseOutputWriter(self):
"""Parsing a single document in mapreduce.yaml with output writer."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
self.assertTrue(mr_yaml)
self.assertEquals(1, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Writer1", mr_yaml.mapreduce[0].mapper.output_writer)
def testParseMissingRequiredAttrs(self):
"""Test parsing with missing required attributes."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n")
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" input_reader: Reader1\n")
def testBadValues(self):
"""Tests when some yaml values are of the wrong type."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params:\n"
" - name: $$Invalid$$\n")
def testMultipleDocuments(self):
"""Tests when multiple documents are present."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"---")
def testOverlappingNames(self):
"""Tests when there are jobs with the same name."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n")
def testToDict(self):
"""Tests encoding the MR document as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_params_validator': 'Validator1',
'mapper_params': {
'entity_kind': 'Kind1',
'human_supplied2': None,
'human_supplied1': None},
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1'
},
{
'mapper_input_reader': 'Reader2',
'mapper_handler': 'Handler2',
'name': 'Mapreduce2'
}
], all_configs)
def testToDictOutputWriter(self):
"""Tests encoding the MR document with output writer as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1',
'mapper_output_writer': 'Writer1',
},
], all_configs)
class ResourceTest(testutil.HandlerTestBase):
"""Tests for the resource handler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ResourceHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/path"
def testPaths(self):
"""Tests that paths are accessible."""
self.handler.get("status")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"<!DOCTYPE html>"))
self.assertEquals("text/html",
self.handler.response.headers["Content-Type"])
self.handler.response.out.truncate(0)
self.handler.get("jquery.js")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"/*!"))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
def testCachingHeaders(self):
"""Tests that caching headers are correct."""
self.handler.get("status")
self.assertEquals("public; max-age=300",
self.handler.response.headers["Cache-Control"])
def testMissing(self):
"""Tests when a resource is requested that doesn't exist."""
self.handler.get("unknown")
self.assertEquals(404, self.handler.response.status)
class ListConfigsTest(testutil.HandlerTestBase):
"""Tests for the ListConfigsHandler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ListConfigsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/path"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests listing available configs."""
old_get_yaml = status.get_mapreduce_yaml
status.get_mapreduce_yaml = lambda: status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n"
" params_validator: MapreduceValidator\n"
" params:\n"
" - name: foo\n"
" value: bar\n")
try:
self.handler.get()
finally:
status.get_mapreduce_yaml = old_get_yaml
self.assertEquals(
{u'configs': [
{u'mapper_params_validator': u'Validator1',
u'mapper_params': {
u'entity_kind': u'Kind1',
u'human_supplied2': None,
u'human_supplied1': None},
u'mapper_input_reader': u'Reader1',
u'mapper_handler': u'Handler1',
u'name': u'Mapreduce1'},
{u'mapper_input_reader': u'Reader2',
u'mapper_handler': u'Handler2',
u'name': u'Mapreduce2',
u'params': {
u'foo': u'bar',},
}]},
json.loads(self.handler.response.out.getvalue()))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
class ListJobsTest(testutil.HandlerTestBase):
"""Tests listing active and inactive jobs."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.handler = status.ListJobsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
TestKind().put()
del self.start.request.headers["X-Requested-With"]
self.start.post()
self.assertEquals(403, self.start.response.status)
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests when there are fewer than the max results to render."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 2")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 3")
self.start.post()
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_args = set([
"active",
"active_shards",
"chart_url",
"chart_width",
"mapreduce_id",
"name",
"shards",
"start_timestamp_ms",
"updated_timestamp_ms",
])
self.assertEquals(3, len(result["jobs"]))
self.assertEquals("my job 3", result["jobs"][0]["name"])
self.assertEquals("my job 2", result["jobs"][1]["name"])
self.assertEquals("my job 1", result["jobs"][2]["name"])
self.assertEquals(expected_args, set(result["jobs"][0].keys()))
self.assertEquals(expected_args, set(result["jobs"][1].keys()))
self.assertEquals(expected_args, set(result["jobs"][2].keys()))
def testCursor(self):
"""Tests when a job cursor is present."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1) # Can not start two jobs before time advances
self.start.request.set("name", "my job 2")
self.start.post()
self.handler.request.set("count", "1")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result["jobs"]))
self.assertTrue("cursor" in result)
self.handler.response.out.truncate(0)
self.handler.request.set("count", "1")
self.handler.request.set("cursor", result['cursor'])
self.handler.get()
result2 = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result2["jobs"]))
self.assertFalse("cursor" in result2)
def testNoJobs(self):
"""Tests when there are no jobs."""
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals({'jobs': []}, result)
class GetJobDetailTest(testutil.HandlerTestBase):
"""Tests listing job status detail."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
for _ in range(100):
TestKind().put()
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set("name", "my job 1")
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.start.post()
result = json.loads(self.start.response.out.getvalue())
self.mapreduce_id = result["mapreduce_id"]
self.handler = status.GetJobDetailHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def KickOffMapreduce(self):
"""Executes pending kickoff task."""
test_support.execute_all_tasks(self.taskqueue)
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests getting the job details."""
self.KickOffMapreduce()
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
expected_shard_keys = set([
"active", "counters", "last_work_item", "result_status",
"shard_description", "shard_id", "shard_number",
"updated_timestamp_ms"])
self.assertEquals(expected_keys, set(result.keys()))
self.assertEquals(8, len(result["shards"]))
self.assertEquals(expected_shard_keys, set(result["shards"][0].keys()))
def testBeforeKickOff(self):
"""Tests getting the job details."""
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
self.assertEquals(expected_keys, set(result.keys()))
def testBadJobId(self):
"""Tests when an invalid job ID is supplied."""
self.handler.request.set("mapreduce_id", "does not exist")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(
{"error_message": "\"Could not find job with ID 'does not exist'\"",
"error_class": "KeyError"},
result)
# TODO(user): Add tests for abort
# TODO(user): Add tests for cleanup
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 267,361,399,960,671,040 | -4,585,653,723,613,998,600 | 34.88676 | 80 | 0.605855 | false |
katsikas/gnuradio | grc/gui/BlockTreeWindow.py | 7 | 7639 | """
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import DEFAULT_BLOCKS_WINDOW_WIDTH, DND_TARGETS
import Utils
import pygtk
pygtk.require('2.0')
import gtk
import gobject
NAME_INDEX = 0
KEY_INDEX = 1
DOC_INDEX = 2
DOC_MARKUP_TMPL="""\
#if $doc
$encode($doc)#slurp
#else
undocumented#slurp
#end if"""
CAT_MARKUP_TMPL="""Category: $cat"""
class BlockTreeWindow(gtk.VBox):
"""The block selection panel."""
def __init__(self, platform, get_flow_graph):
"""
BlockTreeWindow constructor.
Create a tree view of the possible blocks in the platform.
The tree view nodes will be category names, the leaves will be block names.
A mouse double click or button press action will trigger the add block event.
@param platform the particular platform will all block prototypes
@param get_flow_graph get the selected flow graph
"""
gtk.VBox.__init__(self)
self.platform = platform
self.get_flow_graph = get_flow_graph
#make the tree model for holding blocks
self.treestore = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
self.treeview = gtk.TreeView(self.treestore)
self.treeview.set_enable_search(False) #disable pop up search box
self.treeview.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.treeview.connect('button-press-event', self._handle_mouse_button_press)
selection = self.treeview.get_selection()
selection.set_mode('single')
selection.connect('changed', self._handle_selection_change)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Blocks', renderer, text=NAME_INDEX)
self.treeview.append_column(column)
#setup the search
self.treeview.set_enable_search(True)
self.treeview.set_search_equal_func(self._handle_search)
#try to enable the tooltips (available in pygtk 2.12 and above)
try: self.treeview.set_tooltip_column(DOC_INDEX)
except: pass
#setup drag and drop
self.treeview.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, DND_TARGETS, gtk.gdk.ACTION_COPY)
self.treeview.connect('drag-data-get', self._handle_drag_get_data)
#make the scrolled window to hold the tree view
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled_window.add_with_viewport(self.treeview)
scrolled_window.set_size_request(DEFAULT_BLOCKS_WINDOW_WIDTH, -1)
self.pack_start(scrolled_window)
#add button
self.add_button = gtk.Button(None, gtk.STOCK_ADD)
self.add_button.connect('clicked', self._handle_add_button)
self.pack_start(self.add_button, False)
#map categories to iters, automatic mapping for root
self._categories = {tuple(): None}
#add blocks and categories
self.platform.load_block_tree(self)
#initialize
self._update_add_button()
############################################################
## Block Tree Methods
############################################################
def add_block(self, category, block=None):
"""
Add a block with category to this selection window.
Add only the category when block is None.
@param category the category list or path string
@param block the block object or None
"""
if isinstance(category, str): category = category.split('/')
category = tuple(filter(lambda x: x, category)) #tuple is hashable
#add category and all sub categories
for i, cat_name in enumerate(category):
sub_category = category[:i+1]
if sub_category not in self._categories:
iter = self.treestore.insert_before(self._categories[sub_category[:-1]], None)
self.treestore.set_value(iter, NAME_INDEX, '[ %s ]'%cat_name)
self.treestore.set_value(iter, KEY_INDEX, '')
self.treestore.set_value(iter, DOC_INDEX, Utils.parse_template(CAT_MARKUP_TMPL, cat=cat_name))
self._categories[sub_category] = iter
#add block
if block is None: return
iter = self.treestore.insert_before(self._categories[category], None)
self.treestore.set_value(iter, NAME_INDEX, block.get_name())
self.treestore.set_value(iter, KEY_INDEX, block.get_key())
self.treestore.set_value(iter, DOC_INDEX, Utils.parse_template(DOC_MARKUP_TMPL, doc=block.get_doc()))
############################################################
## Helper Methods
############################################################
def _get_selected_block_key(self):
"""
Get the currently selected block key.
@return the key of the selected block or a empty string
"""
selection = self.treeview.get_selection()
treestore, iter = selection.get_selected()
return iter and treestore.get_value(iter, KEY_INDEX) or ''
def _update_add_button(self):
"""
Update the add button's sensitivity.
The button should be active only if a block is selected.
"""
key = self._get_selected_block_key()
self.add_button.set_sensitive(bool(key))
def _add_selected_block(self):
"""
Add the selected block with the given key to the flow graph.
"""
key = self._get_selected_block_key()
if key: self.get_flow_graph().add_new_block(key)
############################################################
## Event Handlers
############################################################
def _handle_search(self, model, column, key, iter):
#determine which blocks match the search key
blocks = self.get_flow_graph().get_parent().get_blocks()
matching_blocks = filter(lambda b: key in b.get_key() or key in b.get_name().lower(), blocks)
#remove the old search category
try: self.treestore.remove(self._categories.pop((self._search_category, )))
except (KeyError, AttributeError): pass #nothing to remove
#create a search category
if not matching_blocks: return
self._search_category = 'Search: %s'%key
for block in matching_blocks: self.add_block(self._search_category, block)
#expand the search category
path = self.treestore.get_path(self._categories[(self._search_category, )])
self.treeview.collapse_all()
self.treeview.expand_row(path, open_all=False)
def _handle_drag_get_data(self, widget, drag_context, selection_data, info, time):
"""
Handle a drag and drop by setting the key to the selection object.
This will call the destination handler for drag and drop.
Only call set when the key is valid to ignore DND from categories.
"""
key = self._get_selected_block_key()
if key: selection_data.set(selection_data.target, 8, key)
def _handle_mouse_button_press(self, widget, event):
"""
Handle the mouse button press.
If a left double click is detected, call add selected block.
"""
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS:
self._add_selected_block()
def _handle_selection_change(self, selection):
"""
Handle a selection change in the tree view.
If a selection changes, set the add button sensitive.
"""
self._update_add_button()
def _handle_add_button(self, widget):
"""
Handle the add button clicked signal.
Call add selected block.
"""
self._add_selected_block()
| gpl-3.0 | -713,326,298,486,548,700 | 7,705,266,542,252,801,000 | 37.97449 | 103 | 0.697473 | false |
tsgit/invenio | modules/bibauthorid/lib/bibauthorid_rabbit.py | 2 | 10561 | from operator import itemgetter
from itertools import cycle, imap, chain, izip
from invenio.bibauthorid_name_utils import compare_names as comp_names, \
create_matchable_name
from invenio import bibauthorid_config as bconfig
from invenio.bibauthorid_backinterface import get_authors_by_name, \
add_signature, get_signatures_of_paper, \
remove_signatures, modify_signature, filter_bibrecs_outside, get_deleted_papers, \
create_new_author_by_signature as new_person_from_signature, get_all_valid_bibrecs, \
remove_papers, get_author_refs_of_paper,\
get_coauthor_refs_of_paper, get_name_by_bibref, \
get_author_by_external_id, update_canonical_names_of_authors, \
update_external_ids_of_authors, remove_empty_authors
from invenio.bibauthorid_matrix_optimization import maximized_mapping
from invenio.bibauthorid_dbinterface import populate_partial_marc_caches
from invenio.bibauthorid_dbinterface import destroy_partial_marc_caches
from invenio.bibauthorid_general_utils import memoized
from invenio.bibtask import task_update_progress
from datetime import datetime
from invenio.dbquery import run_sql
from invenio.bibauthorid_logutils import Logger
now = datetime.now
USE_EXT_IDS = bconfig.RABBIT_USE_EXTERNAL_IDS
EXT_IDS_TO_USE = bconfig.RABBIT_EXTERNAL_IDS_TO_USE
if USE_EXT_IDS:
external_id_getters = list()
if 'InspireID' in EXT_IDS_TO_USE:
from invenio.bibauthorid_backinterface import get_inspire_id_of_signature
external_id_getters.append(get_inspire_id_of_signature)
if 'OrcidID' in EXT_IDS_TO_USE:
from invenio.bibauthorid_backinterface import get_orcid_id_of_signature
external_id_getters.append(get_orcid_id_of_signature)
if 'KAKEN' in EXT_IDS_TO_USE:
from invenio.bibauthorid_backinterface import get_kaken_id_of_signature
external_id_getters.append(get_kaken_id_of_signature)
M_NAME_PIDS_CACHE = None
# The first element of this list is the master function
M_NAME_FUNCTIONS = [create_matchable_name]
def populate_mnames_pids_cache():
global M_NAME_PIDS_CACHE
mnames_pids = run_sql("select distinct(m_name), personid from aidPERSONIDPAPERS where flag>-2")
M_NAME_PIDS_CACHE = dict(mnames_pids)
def destroy_mnames_pids_cache():
global M_NAME_PIDS_CACHE
M_NAME_PIDS_CACHE = None
def rabbit(bibrecs=None, check_invalid_papers=False,
personids_to_update_extids=None, verbose=False):
logger = Logger("Rabbit")
if verbose:
logger.verbose = True
if not bibrecs:
logger.log("Running on all records")
else:
logger.log("Running on %s " % (str(bibrecs)))
populate_mnames_pids_cache()
global M_NAME_PIDS_CACHE
memoized_compare_names = memoized(comp_names)
compare_names = lambda x, y: memoized_compare_names(*sorted((x, y)))
def find_pids_by_matchable_name_with_cache(matchable_name):
try:
matched_pids = [M_NAME_PIDS_CACHE[matchable_name]]
except KeyError:
matched_pids = get_authors_by_name(matchable_name,
use_matchable_name=True)
if matched_pids:
M_NAME_PIDS_CACHE[matchable_name] = matched_pids[0]
return matched_pids
if USE_EXT_IDS:
def get_matched_pids_by_external_ids(sig, rec, pids_having_rec):
'''
This function returns all the matched pids after iterating
through all available external IDs of the system.
'''
for get_external_id_of_signature in external_id_getters:
external_id = get_external_id_of_signature(sig + (rec,))
if external_id:
matched_pids = list(get_author_by_external_id(external_id[0]))
if matched_pids and int(matched_pids[0][0]) in pids_having_rec:
matched_pids = list()
return matched_pids
threshold = 0.8
if not bibrecs or check_invalid_papers:
all_bibrecs = get_all_valid_bibrecs()
if not bibrecs:
bibrecs = all_bibrecs
if check_invalid_papers:
filter_bibrecs_outside(all_bibrecs)
updated_pids = set()
deleted = frozenset(p[0] for p in get_deleted_papers())
bibrecs = list(bibrecs)
for idx, rec in enumerate(bibrecs):
logger.log("Considering %s" % str(rec))
if idx % 100 == 0:
task_update_progress("%d/%d current: %d" % (idx, len(bibrecs), rec))
if idx % 1000 == 0:
destroy_partial_marc_caches()
populate_partial_marc_caches(bibrecs[idx: idx + 1000])
logger.log(float(idx) / len(bibrecs), "%d/%d" % (idx, len(bibrecs)))
if rec in deleted:
remove_papers([rec])
continue
author_refs = get_author_refs_of_paper(rec)
coauthor_refs = get_coauthor_refs_of_paper(rec)
markrefs = frozenset(chain(izip(cycle([100]), imap(itemgetter(0),
author_refs)),
izip(cycle([700]), imap(itemgetter(0),
coauthor_refs))))
personid_rows = [map(int, row[:3]) + [row[4]]
for row in get_signatures_of_paper(rec)]
personidrefs_names = dict(((row[1], row[2]), row[3])
for row in personid_rows)
personidrefs = frozenset(personidrefs_names.keys())
new_signatures = list(markrefs - personidrefs)
old_signatures = list(personidrefs - markrefs)
new_signatures_names = dict((new, get_name_by_bibref(new))
for new in new_signatures)
# matrix |new_signatures| X |old_signatures|
matrix = [[compare_names(new_signatures_names[new],
personidrefs_names[old])
for old in old_signatures] for new in new_signatures]
logger.log(" - Deleted signatures: %s" % str(old_signatures))
logger.log(" - Added signatures: %s" % str(new_signatures))
logger.log(" - Matrix: %s" % str(matrix))
#[new_signatures, old_signatures]
best_match = [(new_signatures[new], old_signatures[old])
for new, old, score in maximized_mapping(matrix)
if score > threshold]
logger.log(" - Best match: %s " % str(best_match))
for new, old in best_match:
logger.log(" - - Moving signature: %s on %s to %s as %s" %
(old, rec, new, new_signatures_names[new]))
modify_signature(old, rec, new, new_signatures_names[new])
remove_signatures(tuple(list(old) + [rec]) for old in old_signatures)
not_matched = frozenset(new_signatures) - frozenset(map(itemgetter(0),
best_match))
remaining_personid_rows = ([x for x in personid_rows
if x[1:3] in old_signatures])
pids_having_rec = set([int(row[0]) for row in remaining_personid_rows])
logger.log(" - Not matched: %s" % str(not_matched))
if not_matched:
used_pids = set(r[0] for r in personid_rows)
for sig in not_matched:
name = new_signatures_names[sig]
matchable_name = create_matchable_name(name)
matched_pids = list()
if USE_EXT_IDS:
matched_pids = get_matched_pids_by_external_ids(sig, rec, pids_having_rec)
if matched_pids:
add_signature(list(sig) + [rec], name,
matched_pids[0][0], m_name=matchable_name)
M_NAME_PIDS_CACHE[matchable_name] = matched_pids[0][0]
updated_pids.add(matched_pids[0][0])
pids_having_rec.add(matched_pids[0][0])
continue
matched_pids = find_pids_by_matchable_name_with_cache(matchable_name)
if not matched_pids:
for matching_function in M_NAME_FUNCTIONS[1:]:
matchable_name = matching_function(name)
matched_pids = find_pids_by_matchable_name_with_cache(matchable_name)
if matched_pids:
break
matched_pids = [p for p in matched_pids if int(p) not in used_pids]
best_matched_pid = None
for matched_pid in matched_pids:
# Because of the wrongly labeled data in the db, all
# of the possible choices have to be checked. If one of the
# coauthors, who had his signature already considered, claimed
# in the past one of the signatures of currently considered
# author, the algorithm will think that two signatures belong
# to the same person, and, will create an unnecessary new
# profile.
if not int(matched_pid) in pids_having_rec:
best_matched_pid = matched_pid
break
if not best_matched_pid:
new_pid = new_person_from_signature(list(sig) + [rec],
name, matchable_name)
M_NAME_PIDS_CACHE[matchable_name] = new_pid
used_pids.add(new_pid)
updated_pids.add(new_pid)
else:
add_signature(list(sig) + [rec], name,
best_matched_pid, m_name=matchable_name)
M_NAME_PIDS_CACHE[matchable_name] = best_matched_pid
used_pids.add(best_matched_pid)
updated_pids.add(best_matched_pid)
pids_having_rec.add(best_matched_pid)
logger.log('Finished with %s' % str(rec))
logger.update_status_final()
destroy_partial_marc_caches()
if personids_to_update_extids:
updated_pids |= set(personids_to_update_extids)
if updated_pids: # an empty set will update all canonical_names
update_canonical_names_of_authors(updated_pids)
update_external_ids_of_authors(updated_pids,
limit_to_claimed_papers=bconfig.LIMIT_EXTERNAL_IDS_COLLECTION_TO_CLAIMED_PAPERS,
force_cache_tables=True)
destroy_partial_marc_caches()
destroy_mnames_pids_cache()
remove_empty_authors()
task_update_progress("Done!")
| gpl-2.0 | 2,806,372,719,344,303,000 | -2,620,440,682,952,134,700 | 39.30916 | 119 | 0.589812 | false |
stosdev/zebra-supervisor | judge/models/profile.py | 1 | 1441 | # -*- coding: utf-8 -*-
"""Module containing judge user profiles and various utilities."""
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
@python_2_unicode_compatible
class Profile(models.Model):
"""The users profile class."""
user = models.OneToOneField(User, related_name='profile')
institute_name = models.CharField(_("Institute name"), max_length=255,
blank=True, null=True)
team_name = models.CharField(_("Team name"), max_length=255, blank=True,
null=True)
room_number = models.CharField(_("Room number"), max_length=10, blank=True,
null=True)
computer_number = models.CharField(_("Computer number"), max_length=10,
blank=True, null=True)
class Meta:
verbose_name = _("Profile")
verbose_name_plural = _("Profiles")
app_label = 'judge'
def __str__(self):
return u"{}".format(self.user.username)
def create_profile(sender, instance, created, **kwargs):
"""Create an empty profile as soon as a user is created."""
if created:
Profile.objects.create(user=instance)
post_save.connect(create_profile, sender=User)
| gpl-3.0 | -3,416,690,337,363,316,000 | 999,928,780,942,422,500 | 34.146341 | 79 | 0.628036 | false |
MDPvis/rlpy | rlpy/Representations/IndependentDiscretization.py | 4 | 2174 | """Independent Discretization"""
from .Representation import Representation
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class IndependentDiscretization(Representation):
"""
Creates a feature for each discrete bin in each dimension; the feature
vector for a given state is comprised of binary features, where only the
single feature in a particular dimension is 1, all others 0.
I.e., in a particular state, the sum of all elements of a feature vector
equals the number of dimensions in the state space.
Note that This is the minimum number of binary features required to
uniquely represent a state in a given finite discrete domain.
"""
def __init__(self, domain, discretization=20):
self.setBinsPerDimension(domain, discretization)
self.features_num = int(sum(self.bins_per_dim))
self.maxFeatureIDperDimension = np.cumsum(self.bins_per_dim) - 1
super(
IndependentDiscretization,
self).__init__(
domain,
discretization)
def phi_nonTerminal(self, s):
F_s = np.zeros(
self.features_num,
'bool')
F_s[self.activeInitialFeatures(s)] = 1
return F_s
def getDimNumber(self, f):
# Returns the dimension number corresponding to this feature
dim = np.searchsorted(self.maxFeatureIDperDimension, f)
return dim
def getFeatureName(self, feat_id):
if hasattr(self.domain, 'DimNames'):
dim = np.searchsorted(self.maxFeatureIDperDimension, feat_id)
# Find the index of the feature in the corresponding dimension
index_in_dim = feat_id
if dim != 0:
index_in_dim = feat_id - self.maxFeatureIDperDimension[dim - 1]
print self.domain.DimNames[dim]
f_name = self.domain.DimNames[dim] + '=' + str(index_in_dim)
def featureType(self):
return bool
| bsd-3-clause | -9,125,945,426,021,064,000 | -600,929,678,089,901,200 | 35.847458 | 79 | 0.643054 | false |
johndpope/tensorflow | tensorflow/python/summary/writer/event_file_writer.py | 104 | 5848 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import threading
import time
import six
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
class EventFileWriter(object):
"""Writes `Event` protocol buffers to an event file.
The `EventFileWriter` class creates an event file in the specified directory,
and asynchronously writes Event protocol buffers to the file. The Event file
is encoded using the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, logdir, max_queue=10, flush_secs=120,
filename_suffix=None):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._logdir = logdir
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
self._event_queue = six.moves.queue.Queue(max_queue)
self._ev_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
self._flush_secs = flush_secs
self._sentinel_event = self._get_sentinel_event()
if filename_suffix:
self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))
self._closed = False
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
def _get_sentinel_event(self):
"""Generate a sentinel event for terminating worker."""
return event_pb2.Event()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
self._closed = False
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
self._event_queue.put(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._event_queue.join()
self._ev_writer.Flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.add_event(self._sentinel_event)
self.flush()
self._worker.join()
self._ev_writer.Close()
self._closed = True
class _EventLoggerThread(threading.Thread):
"""Thread that logs events."""
def __init__(self, queue, ev_writer, flush_secs, sentinel_event):
"""Creates an _EventLoggerThread.
Args:
queue: A Queue from which to dequeue events.
ev_writer: An event writer. Used to log brain events for
the visualizer.
flush_secs: How often, in seconds, to flush the
pending file to disk.
sentinel_event: A sentinel element in queue that tells this thread to
terminate.
"""
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue
self._ev_writer = ev_writer
self._flush_secs = flush_secs
# The first event will be flushed immediately.
self._next_event_flush_time = 0
self._sentinel_event = sentinel_event
def run(self):
while True:
event = self._queue.get()
if event is self._sentinel_event:
self._queue.task_done()
break
try:
self._ev_writer.WriteEvent(event)
# Flush the event writer every so often.
now = time.time()
if now > self._next_event_flush_time:
self._ev_writer.Flush()
# Do it again in two minutes.
self._next_event_flush_time = now + self._flush_secs
finally:
self._queue.task_done()
| apache-2.0 | 616,485,834,855,060,600 | 3,378,484,171,207,113,700 | 33 | 80 | 0.663988 | false |
backmari/moose | python/chigger/tests/line/line.py | 6 | 1115 | #!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
line = chigger.graphs.Line(x=[0,1], y=[2,4])
graph = chigger.graphs.Graph(line)
window = chigger.RenderWindow(graph, size=[300,300], test=True)
window.write('line.png')
window.start()
| lgpl-2.1 | 2,539,118,319,596,977,000 | -7,689,908,972,927,104,000 | 49.681818 | 65 | 0.386547 | false |
phobson/bokeh | scripts/version_update.py | 1 | 2576 | import os
import re
import sys
def check_input(new_ver):
""" Ensure that user input matches the format X.X.X """
pat = r'\d+.\d+.\d+'
if not re.match(pat, new_ver):
print("The new version must be in the format X.X.X (ex. '0.6.0')")
return True
def version_update(new_ver, file_array):
""" Replace existing version/release number in an array of files
with a user-supplied version number (new_ver)"""
pat = r"""(release|version)([\" ][:=] [\"\'])(\d+.\d+.\d+)([\"\'])"""
# List that will contain any files where the version number was successfully replaced
replaced = []
# Set as false until a match is found and replaced in the loop below
early_ver = False
for ver_file in file_array:
f = open(ver_file)
text = f.read()
matchObj = re.search(pat, text)
f.close()
if matchObj:
early_ver = matchObj.group(3)
f = open(ver_file, 'w')
text = re.sub(pat, r'\g<1>\g<2>%s\g<4>' % new_ver, text)
f.write(text)
replaced.append(ver_file)
else:
print("Unable to find version number matching expected format 'X.X.X' in %s" % ver_file)
if early_ver:
print("Version number changed from %s to %s in \n%s" % (early_ver, new_ver, replaced))
def version_add(new_ver, file_array):
"""Add last version number in an array of files
with a user-supplied last version number"""
for ver_file in file_array:
with open(ver_file, "r") as f:
flines = f.readlines()
for i, line in enumerate(flines):
if "ALL_VERSIONS" in line:
all_vers = flines[i]
begin, end = all_vers.split("[")
all_vers = begin + "['{}', ".format(new_ver) + end
flines[i] = all_vers
with open(ver_file, "w") as f:
f.writelines(flines)
print("Version number {new_ver} added in {ver_file}".format(new_ver=new_ver, ver_file=ver_file))
if __name__ == '__main__':
if not len(sys.argv) == 3:
print("Please provide the new version number and the previous one.")
sys.exit(1)
os.chdir('../')
files_to_update = ['bokehjs/src/coffee/version.coffee', 'bokehjs/package.json']
files_to_add = ['sphinx/source/conf.py']
updated_version = sys.argv[1]
last_version = sys.argv[2]
if check_input(updated_version):
sys.exit(1)
version_update(updated_version, files_to_update)
version_add(updated_version, files_to_add)
| bsd-3-clause | 563,646,525,704,071,700 | -8,881,856,049,890,078,000 | 32.454545 | 104 | 0.571817 | false |
jonathan-beard/edx-platform | lms/djangoapps/courseware/tests/tests.py | 115 | 6821 | """
Test for LMS courseware app.
"""
from textwrap import dedent
from unittest import TestCase
from django.core.urlresolvers import reverse
import mock
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_XML_MODULESTORE as XML_MODULESTORE
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE as TOY_MODULESTORE
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@attr('shard_1')
class ActivateLoginTest(LoginEnrollmentTestCase):
"""
Test logging in and logging out.
"""
def setUp(self):
super(ActivateLoginTest, self).setUp()
self.setup_user()
def test_activate_login(self):
"""
Test login -- the setup function does all the work.
"""
pass
def test_logout(self):
"""
Test logout -- setup function does login.
"""
self.logout()
class PageLoaderTestCase(LoginEnrollmentTestCase):
"""
Base class that adds a function to load all pages in a modulestore.
"""
def check_all_pages_load(self, course_key):
"""
Assert that all pages in the course load correctly.
`course_id` is the ID of the course to check.
"""
store = modulestore()
# Enroll in the course before trying to access pages
course = store.get_course(course_key)
self.enroll(course, True)
# Search for items in the course
items = store.get_items(course_key)
if len(items) < 1:
self.fail('Could not retrieve any items from course')
# Try to load each item in the course
for descriptor in items:
if descriptor.location.category == 'about':
self._assert_loads('about_course',
{'course_id': course_key.to_deprecated_string()},
descriptor)
elif descriptor.location.category == 'static_tab':
kwargs = {'course_id': course_key.to_deprecated_string(),
'tab_slug': descriptor.location.name}
self._assert_loads('static_tab', kwargs, descriptor)
elif descriptor.location.category == 'course_info':
self._assert_loads('info', {'course_id': course_key.to_deprecated_string()},
descriptor)
else:
kwargs = {'course_id': course_key.to_deprecated_string(),
'location': descriptor.location.to_deprecated_string()}
self._assert_loads('jump_to', kwargs, descriptor,
expect_redirect=True,
check_content=True)
def _assert_loads(self, django_url, kwargs, descriptor,
expect_redirect=False,
check_content=False):
"""
Assert that the url loads correctly.
If expect_redirect, then also check that we were redirected.
If check_content, then check that we don't get
an error message about unavailable modules.
"""
url = reverse(django_url, kwargs=kwargs)
response = self.client.get(url, follow=True)
if response.status_code != 200:
self.fail('Status %d for page %s' %
(response.status_code, descriptor.location))
if expect_redirect:
self.assertEqual(response.redirect_chain[0][1], 302)
if check_content:
self.assertNotContains(response, "this module is temporarily unavailable")
self.assertNotIsInstance(descriptor, ErrorDescriptor)
@attr('shard_1')
class TestXmlCoursesLoad(ModuleStoreTestCase, PageLoaderTestCase):
"""
Check that all pages in test courses load properly from XML.
"""
MODULESTORE = XML_MODULESTORE
def setUp(self):
super(TestXmlCoursesLoad, self).setUp()
self.setup_user()
def test_toy_course_loads(self):
# Load one of the XML based courses
# Our test mapping rules allow the MixedModuleStore
# to load this course from XML, not Mongo.
self.check_all_pages_load(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
@attr('shard_1')
class TestMongoCoursesLoad(ModuleStoreTestCase, PageLoaderTestCase):
"""
Check that all pages in test courses load properly from Mongo.
"""
MODULESTORE = TOY_MODULESTORE
def setUp(self):
super(TestMongoCoursesLoad, self).setUp()
self.setup_user()
@mock.patch('xmodule.course_module.requests.get')
def test_toy_textbooks_loads(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
location = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall').make_usage_key('course', '2012_Fall')
course = self.store.get_item(location)
self.assertGreater(len(course.textbooks), 0)
@attr('shard_1')
class TestDraftModuleStore(ModuleStoreTestCase):
def test_get_items_with_course_items(self):
store = modulestore()
# fix was to allow get_items() to take the course_id parameter
store.get_items(SlashSeparatedCourseKey('abc', 'def', 'ghi'), qualifiers={'category': 'vertical'})
# test success is just getting through the above statement.
# The bug was that 'course_id' argument was
# not allowed to be passed in (i.e. was throwing exception)
@attr('shard_1')
class TestLmsFieldData(TestCase):
"""
Tests of the LmsFieldData class
"""
def test_lms_field_data_wont_nest(self):
# Verify that if an LmsFieldData is passed into LmsFieldData as the
# authored_data, that it doesn't produced a nested field data.
#
# This fixes a bug where re-use of the same descriptor for many modules
# would cause more and more nesting, until the recursion depth would be
# reached on any attribute access
# pylint: disable=protected-access
base_authored = mock.Mock()
base_student = mock.Mock()
first_level = LmsFieldData(base_authored, base_student)
second_level = LmsFieldData(first_level, base_student)
self.assertEquals(second_level._authored_data, first_level._authored_data)
self.assertNotIsInstance(second_level._authored_data, LmsFieldData)
| agpl-3.0 | 8,173,292,540,013,727,000 | -1,033,283,894,768,253,200 | 34.712042 | 107 | 0.633778 | false |
hariseldon99/archives | dtwa_ising_longrange/dtwa_ising_longrange/redirect_stdout.py | 2 | 1292 | import os
import sys
import contextlib
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextlib.contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
"""
http://stackoverflow.com/a/22434262/190597 (J.F. Sebastian)
"""
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
| gpl-2.0 | -5,487,240,441,417,902,000 | -3,036,495,714,730,856,000 | 35.914286 | 80 | 0.620743 | false |
ashray/VTK-EVM | ThirdParty/Twisted/twisted/internet/wxsupport.py | 60 | 1445 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Old method of wxPython support for Twisted.
twisted.internet.wxreactor is probably a better choice.
To use::
| # given a wxApp instance called myWxAppInstance:
| from twisted.internet import wxsupport
| wxsupport.install(myWxAppInstance)
Use Twisted's APIs for running and stopping the event loop, don't use
wxPython's methods.
On Windows the Twisted event loop might block when dialogs are open
or menus are selected.
Maintainer: Itamar Shtull-Trauring
"""
import warnings
warnings.warn("wxsupport is not fully functional on Windows, wxreactor is better.")
# wxPython imports
from wxPython.wx import wxApp
# twisted imports
from twisted.internet import reactor
from twisted.python.runtime import platformType
class wxRunner:
"""Make sure GUI events are handled."""
def __init__(self, app):
self.app = app
def run(self):
"""
Execute pending WX events followed by WX idle events and
reschedule.
"""
# run wx events
while self.app.Pending():
self.app.Dispatch()
# run wx idle events
self.app.ProcessIdle()
reactor.callLater(0.02, self.run)
def install(app):
"""Install the wxPython support, given a wxApp instance"""
runner = wxRunner(app)
reactor.callLater(0.02, runner.run)
__all__ = ["install"]
| bsd-3-clause | 3,031,203,015,504,495,600 | -8,209,410,808,143,512,000 | 22.688525 | 83 | 0.677509 | false |
nwjs/chromium.src | third_party/android_platform/development/scripts/stack_core.py | 2 | 24484 | #!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import itertools
import logging
import multiprocessing
import os
import re
import struct
import subprocess
import sys
import time
import zipfile
import symbol
from pylib import constants
UNKNOWN = '<unknown>'
HEAP = '[heap]'
STACK = '[stack]'
_DEFAULT_JOBS=8
_CHUNK_SIZE = 1000
_BASE_APK = 'base.apk'
_FALLBACK_SO = 'libchrome.so'
# pylint: disable=line-too-long
_ABI_LINE = re.compile('ABI: \'(?P<abi>[a-z0-9A-Z]+)\'')
_PROCESS_INFO_LINE = re.compile('(pid: [0-9]+, tid: [0-9]+.*)')
# Same as above, but used to extract the pid.
_PROCESS_INFO_PID = re.compile('pid: ([0-9]+)')
_SIGNAL_LINE = re.compile('(signal [0-9]+ \(.*\).*)')
_REGISTER_LINE = re.compile('(([ ]*[0-9a-z]{2} [0-9a-f]{8}){4})')
_THREAD_LINE = re.compile('(.*)(\-\-\- ){15}\-\-\-')
_DALVIK_JNI_THREAD_LINE = re.compile("(\".*\" prio=[0-9]+ tid=[0-9]+ NATIVE.*)")
_DALVIK_NATIVE_THREAD_LINE = re.compile("(\".*\" sysTid=[0-9]+ nice=[0-9]+.*)")
_JAVA_STDERR_LINE = re.compile("([0-9]+)\s+[0-9]+\s+.\s+System.err:\s*(.+)")
_MISC_HEADER = re.compile(
'(?:Tombstone written to:|Abort message:|Revision:|Build fingerprint:).*')
# Matches LOG(FATAL) lines, like the following example:
# [FATAL:source_file.cc(33)] Check failed: !instances_.empty()
_LOG_FATAL_LINE = re.compile('(\[FATAL\:.*\].*)$')
# Note that both trace and value line matching allow for variable amounts of
# whitespace (e.g. \t). This is because the we want to allow for the stack
# tool to operate on AndroidFeedback provided system logs. AndroidFeedback
# strips out double spaces that are found in tombsone files and logcat output.
#
# Examples of matched trace lines include lines from tombstone files like:
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so (symbol)
# Or lines from AndroidFeedback crash report system logs like:
# 03-25 00:51:05.520 I/DEBUG ( 65): #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# Please note the spacing differences.
_TRACE_LINE = re.compile(
'(.*)\#(?P<frame>[0-9]+)[ \t]+(..)[ \t]+(0x)?(?P<address>[0-9a-f]{0,16})[ \t]+(?P<lib>[^\r\n \t]*)(?P<symbol_present> \((?P<symbol_name>.*)\))?'
)
# Matches lines emitted by src/base/debug/stack_trace_android.cc, like:
# #00 0x7324d92d /data/app-lib/org.chromium.native_test-1/libbase.cr.so+0x0006992d
# This pattern includes the unused named capture groups <symbol_present> and
# <symbol_name> so that it can interoperate with the |_TRACE_LINE| regex.
_DEBUG_TRACE_LINE = re.compile('(.*)(?P<frame>\#[0-9]+ 0x[0-9a-f]{8,16}) '
'(?P<lib>[^+]+)\+0x(?P<address>[0-9a-f]{8,16})'
'(?P<symbol_present>)(?P<symbol_name>)')
# Examples of matched value lines include:
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so (symbol)
# 03-25 00:51:05.530 I/DEBUG ( 65): bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# Again, note the spacing differences.
_VALUE_LINE = re.compile(
'(.*)([0-9a-f]{8,16})[ \t]+([0-9a-f]{8,16})[ \t]+([^\r\n \t]*)( \((.*)\))?')
# Lines from 'code around' sections of the output will be matched before
# value lines because otheriwse the 'code around' sections will be confused as
# value lines.
#
# Examples include:
# 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
# 03-25 00:51:05.530 I/DEBUG ( 65): 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
_CODE_LINE = re.compile('(.*)[ \t]*[a-f0-9]{8,16}[ \t]*[a-f0-9]{8,16}' +
'[ \t]*[a-f0-9]{8,16}[ \t]*[a-f0-9]{8,16}' +
'[ \t]*[a-f0-9]{8,16}[ \t]*[ \r\n]')
# This pattern is used to find shared library offset in APK.
# Example:
# (offset 0x568000)
_SHARED_LIB_OFFSET_IN_APK = re.compile(' \(offset 0x(?P<offset>[0-9a-f]{0,16})\)')
# pylint: enable=line-too-long
def PrintTraceLines(trace_lines):
"""Print back trace."""
maxlen = min(80, max(map(lambda tl: len(tl[1]), trace_lines)))
print
print 'Stack Trace:'
print ' RELADDR ' + 'FUNCTION'.ljust(maxlen) + ' FILE:LINE'
for tl in trace_lines:
(addr, symbol_with_offset, location) = tl
normalized = os.path.normpath(location)
print ' %8s %s %s' % (addr, symbol_with_offset.ljust(maxlen), normalized)
return
def PrintValueLines(value_lines):
"""Print stack data values."""
maxlen = min(80, max(map(lambda tl: len(tl[2]), value_lines)))
print
print 'Stack Data:'
print ' ADDR VALUE ' + 'FUNCTION'.ljust(maxlen) + ' FILE:LINE'
for vl in value_lines:
(addr, value, symbol_with_offset, location) = vl
print ' %8s %8s %s %s' % (addr, value, symbol_with_offset.ljust(maxlen),
location)
return
def PrintJavaLines(java_lines):
"""Print java stderr lines."""
print
print('Java stderr from crashing pid '
'(may identify underlying Java exception):')
for l in java_lines:
if l.startswith('at'):
print ' ',
print l
def PrintOutput(trace_lines, value_lines, java_lines, more_info):
if trace_lines:
PrintTraceLines(trace_lines)
if value_lines:
# TODO(cjhopman): it seems that symbol.SymbolInformation always fails to
# find information for addresses in value_lines in chrome libraries, and so
# value_lines have little value to us and merely clutter the output.
# Since information is sometimes contained in these lines (from system
# libraries), don't completely disable them.
if more_info:
PrintValueLines(value_lines)
if java_lines:
PrintJavaLines(java_lines)
def PrintDivider():
print
print '-----------------------------------------------------\n'
def StreamingConvertTrace(_, load_vaddrs, more_info, fallback_monochrome,
arch_defined, llvm_symbolizer, apks_directory):
"""Symbolize stacks on the fly as they are read from an input stream."""
if fallback_monochrome:
global _FALLBACK_SO
_FALLBACK_SO = 'libmonochrome.so'
useful_lines = []
so_dirs = []
in_stack = False
def ConvertStreamingChunk():
logging.info("Stack found. Symbolizing...")
if so_dirs:
UpdateLibrarySearchPath(so_dirs)
# if arch isn't defined in command line, find it from log
if not arch_defined:
arch = _FindAbi(useful_lines)
if arch:
print 'Symbolizing stack using ABI=' + arch
symbol.ARCH = arch
ResolveCrashSymbol(list(useful_lines), more_info, llvm_symbolizer)
preprocessor = PreProcessLog(load_vaddrs, apks_directory)
for line in iter(sys.stdin.readline, b''):
print line,
maybe_line, maybe_so_dir = preprocessor([line])
useful_lines.extend(maybe_line)
so_dirs.extend(maybe_so_dir)
if in_stack:
if not maybe_line:
ConvertStreamingChunk()
so_dirs = []
useful_lines = []
in_stack = False
else:
if _TRACE_LINE.match(line) or _DEBUG_TRACE_LINE.match(line) or \
_VALUE_LINE.match(line) or _CODE_LINE.match(line):
in_stack = True
if in_stack:
ConvertStreamingChunk()
def ConvertTrace(lines, load_vaddrs, more_info, fallback_monochrome,
arch_defined, llvm_symbolizer, apks_directory):
"""Convert strings containing native crash to a stack."""
if fallback_monochrome:
global _FALLBACK_SO
_FALLBACK_SO = 'libmonochrome.so'
start = time.time()
chunks = [lines[i: i+_CHUNK_SIZE] for i in xrange(0, len(lines), _CHUNK_SIZE)]
use_multiprocessing = len(chunks) > 1 and (
os.environ.get('STACK_DISABLE_ASYNC') != '1')
if use_multiprocessing:
pool = multiprocessing.Pool(processes=_DEFAULT_JOBS)
results = pool.map(PreProcessLog(load_vaddrs, apks_directory), chunks)
else:
results = map(PreProcessLog(load_vaddrs, apks_directory), chunks)
useful_log = []
so_dirs = []
for result in results:
useful_log += result[0]
so_dirs += result[1]
if use_multiprocessing:
pool.close()
pool.join()
end = time.time()
logging.debug('Finished processing. Elapsed time: %.4fs', (end - start))
if so_dirs:
UpdateLibrarySearchPath(so_dirs)
# if arch isn't defined in command line, find it from log
if not arch_defined:
arch = _FindAbi(useful_log)
if arch:
print 'Symbolizing stack using ABI:', arch
symbol.ARCH = arch
ResolveCrashSymbol(list(useful_log), more_info, llvm_symbolizer)
end = time.time()
logging.debug('Finished resolving symbols. Elapsed time: %.4fs',
(end - start))
class PreProcessLog:
"""Closure wrapper, for multiprocessing.Pool.map."""
def __init__(self, load_vaddrs, apks_directory):
"""Bind load_vaddrs to the PreProcessLog closure.
Args:
load_vaddrs: LOAD segment min_vaddrs keyed on mapped executable
"""
self._load_vaddrs = load_vaddrs
self._apks_directory = apks_directory
# This is mapping from apk's offset to shared libraries.
self._shared_libraries_mapping = dict()
# The list of directires in which instead of default output dir,
# the shared libraries is found.
self._so_dirs = []
def _DetectSharedLibrary(self, lib, symbol_present):
"""Detect the possible shared library from the mapping offset of APK
Return:
the shared library in APK if only one is found.
"""
offset_match = _SHARED_LIB_OFFSET_IN_APK.match(symbol_present)
if not offset_match:
return
offset = offset_match.group('offset')
key = '%s:%s' % (lib, offset)
if self._shared_libraries_mapping.has_key(key):
soname = self._shared_libraries_mapping[key]
else:
soname, host_so = _FindSharedLibraryFromAPKs(constants.GetOutDirectory(),
self._apks_directory,
int(offset, 16))
if soname:
self._shared_libraries_mapping[key] = soname
so_dir = os.path.dirname(host_so)
# Store the directory if it is not the default output dir, so
# we can update library search path in main process.
if not os.path.samefile(constants.GetOutDirectory(), so_dir):
self._so_dirs.append(so_dir)
logging.info('Detected: %s is %s which is loaded directly from APK.',
host_so, soname)
return soname
def _AdjustAddress(self, address, lib):
"""Add the vaddr of the library's first LOAD segment to address.
Args:
address: symbol address as a hexadecimal string
lib: path to loaded library
Returns:
address+load_vaddrs[key] if lib ends with /key, otherwise address
"""
for key, offset in self._load_vaddrs.iteritems():
if lib.endswith('/' + key):
# Add offset to address, and return the result as a hexadecimal string
# with the same number of digits as the original. This allows the
# caller to make a direct textual substitution.
return ('%%0%dx' % len(address)) % (int(address, 16) + offset)
return address
def __call__(self, lines):
"""Preprocess the strings, only keep the useful ones.
Args:
lines: a list of byte strings read from logcat
Returns:
A list of unicode strings related to native crash
"""
useful_log = []
for ln in lines:
line = unicode(ln, errors='ignore')
if (_PROCESS_INFO_LINE.search(line)
or _SIGNAL_LINE.search(line)
or _REGISTER_LINE.search(line)
or _THREAD_LINE.search(line)
or _DALVIK_JNI_THREAD_LINE.search(line)
or _DALVIK_NATIVE_THREAD_LINE.search(line)
or _LOG_FATAL_LINE.search(line)
or _DEBUG_TRACE_LINE.search(line)
or _ABI_LINE.search(line)
or _JAVA_STDERR_LINE.search(line)
or _MISC_HEADER.search(line)):
useful_log.append(line)
continue
match = _TRACE_LINE.match(line)
if match:
lib, symbol_present = match.group('lib', 'symbol_present')
extension = os.path.splitext(lib)[1]
if extension == '.so' and '.apk!' in lib:
# For Android Q+, where trace lines have "...base.apk!libchrome.so",
# convert the ! to a / so that the line parses like a conventional
# library line.
line = line.replace('.apk!', '.apk/')
elif extension == '.apk' and symbol_present:
soname = self._DetectSharedLibrary(lib, symbol_present)
if soname:
line = line.replace('/' + os.path.basename(lib), '/' + soname)
elif not self._apks_directory:
# If the trace line suggests a direct load from APK, replace the
# APK name with _FALLBACK_SO, unless an APKs directory was
# explicitly specified (in which case, the correct .so should always
# be identified, and using a fallback could be misleading).
line = line.replace('/' + _BASE_APK, '/' + _FALLBACK_SO)
logging.debug("Can't detect shared library in APK, fallback to" +
" library " + _FALLBACK_SO)
# For trace lines specifically, the address may need to be adjusted
# to account for relocation packing. This is because debuggerd on
# pre-M platforms does not understand non-zero vaddr LOAD segments.
address, lib = match.group('address', 'lib')
adjusted_address = self._AdjustAddress(address, lib)
useful_log.append(line.replace(address, adjusted_address, 1))
continue
if _CODE_LINE.match(line):
# Code lines should be ignored. If this were excluded the 'code around'
# sections would trigger value_line matches.
continue
if _VALUE_LINE.match(line):
useful_log.append(line)
return useful_log, self._so_dirs
def ResolveCrashSymbol(lines, more_info, llvm_symbolizer):
"""Convert unicode strings which contains native crash to a stack
"""
trace_lines = []
value_lines = []
last_frame = -1
pid = -1
# Collects all java exception lines, keyed by pid for later output during
# native crash handling.
java_stderr_by_pid = {}
for line in lines:
java_stderr_match = _JAVA_STDERR_LINE.search(line)
if java_stderr_match:
pid, msg = java_stderr_match.groups()
java_stderr_by_pid.setdefault(pid, []).append(msg)
for line in lines:
# AndroidFeedback adds zero width spaces into its crash reports. These
# should be removed or the regular expresssions will fail to match.
process_header = _PROCESS_INFO_LINE.search(line)
signal_header = _SIGNAL_LINE.search(line)
register_header = _REGISTER_LINE.search(line)
thread_header = _THREAD_LINE.search(line)
dalvik_jni_thread_header = _DALVIK_JNI_THREAD_LINE.search(line)
dalvik_native_thread_header = _DALVIK_NATIVE_THREAD_LINE.search(line)
log_fatal_header = _LOG_FATAL_LINE.search(line)
misc_header = _MISC_HEADER.search(line)
if (process_header or signal_header or register_header or thread_header or
dalvik_jni_thread_header or dalvik_native_thread_header or
log_fatal_header or misc_header):
if trace_lines or value_lines:
java_lines = []
if pid != -1 and pid in java_stderr_by_pid:
java_lines = java_stderr_by_pid[pid]
PrintOutput(trace_lines, value_lines, java_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = -1
pid = -1
if process_header:
# Track the last reported pid to find java exceptions.
pid = _PROCESS_INFO_PID.search(process_header.group(1)).group(1)
print process_header.group(1)
if signal_header:
print signal_header.group(1)
if register_header:
print register_header.group(1)
if thread_header:
print thread_header.group(1)
if dalvik_jni_thread_header:
print dalvik_jni_thread_header.group(1)
if dalvik_native_thread_header:
print dalvik_native_thread_header.group(1)
if log_fatal_header:
print log_fatal_header.group(1)
if misc_header:
print misc_header.group(0)
continue
match = _TRACE_LINE.match(line) or _DEBUG_TRACE_LINE.match(line)
if match:
frame, code_addr, area, _, symbol_name = match.group(
'frame', 'address', 'lib', 'symbol_present', 'symbol_name')
logging.debug('Found trace line: %s' % line.strip())
if frame <= last_frame and (trace_lines or value_lines):
java_lines = []
if pid != -1 and pid in java_stderr_by_pid:
java_lines = java_stderr_by_pid[pid]
PrintOutput(trace_lines, value_lines, java_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
pid = -1
last_frame = frame
if area == UNKNOWN or area == HEAP or area == STACK:
trace_lines.append((code_addr, '', area))
else:
logging.debug('Identified lib: %s' % area)
# If a calls b which further calls c and c is inlined to b, we want to
# display "a -> b -> c" in the stack trace instead of just "a -> c"
# To use llvm symbolizer, the hexadecimal address has to start with 0x.
info = llvm_symbolizer.GetSymbolInformation(
os.path.join(symbol.SYMBOLS_DIR, symbol.TranslateLibPath(area)),
'0x' + code_addr)
logging.debug('symbol information: %s' % info)
nest_count = len(info) - 1
for source_symbol, source_location in info:
if nest_count > 0:
nest_count = nest_count - 1
trace_lines.append(('v------>', source_symbol, source_location))
elif '<UNKNOWN>' in source_symbol and symbol_name:
# If the symbolizer couldn't find a symbol name, but the trace had
# one, use what the trace had.
trace_lines.append((code_addr, symbol_name, source_location))
else:
trace_lines.append((code_addr,
source_symbol,
source_location))
match = _VALUE_LINE.match(line)
if match:
(_, addr, value, area, _, symbol_name) = match.groups()
if area == UNKNOWN or area == HEAP or area == STACK or not area:
value_lines.append((addr, value, '', area))
else:
info = llvm_symbolizer.GetSymbolInformation(
os.path.join(symbol.SYMBOLS_DIR, symbol.TranslateLibPath(area)),
'0x' + value)
source_symbol, source_location = info.pop()
value_lines.append((addr,
value,
source_symbol,
source_location))
java_lines = []
if pid != -1 and pid in java_stderr_by_pid:
java_lines = java_stderr_by_pid[pid]
PrintOutput(trace_lines, value_lines, java_lines, more_info)
def UpdateLibrarySearchPath(so_dirs):
# All dirs in so_dirs must be same, since a dir represents the cpu arch.
so_dir = set(so_dirs)
so_dir_len = len(so_dir)
if so_dir_len > 0:
if so_dir_len > 1:
raise Exception("Found different so dirs, they are %s", repr(so_dir))
else:
search_path = so_dir.pop()
logging.info("Search libraries in %s", search_path)
symbol.SetSecondaryAbiOutputPath(search_path)
def GetUncompressedSharedLibraryFromAPK(apkname, offset):
"""Check if there is uncompressed shared library at specifc offset of APK."""
FILE_NAME_LEN_OFFSET = 26
FILE_NAME_OFFSET = 30
soname = ""
sosize = 0
with zipfile.ZipFile(apkname, 'r') as apk:
for infoList in apk.infolist():
_, file_extension = os.path.splitext(infoList.filename)
if (file_extension == '.so' and
infoList.file_size == infoList.compress_size):
with open(apkname, 'rb') as f:
f.seek(infoList.header_offset + FILE_NAME_LEN_OFFSET)
file_name_len = struct.unpack('H', f.read(2))[0]
extra_field_len = struct.unpack('H', f.read(2))[0]
file_offset = (infoList.header_offset + FILE_NAME_OFFSET +
file_name_len + extra_field_len)
f.seek(file_offset)
if offset == file_offset and f.read(4) == "\x7fELF":
soname = infoList.filename.replace('crazy.', '')
sosize = infoList.file_size
break
return soname, sosize
def _GetSharedLibraryInHost(soname, sosize, dirs):
"""Find a shared library by name in a list of directories.
Args:
soname: library name (e.g. libfoo.so)
sosize: library file size to match.
dirs: list of directories to look for the corresponding file.
Returns:
host library path if found, or None
"""
for d in dirs:
host_so_file = os.path.join(d, os.path.basename(soname))
if not os.path.isfile(host_so_file):
continue
if os.path.getsize(host_so_file) != sosize:
continue
logging.debug("%s match to the one in APK" % host_so_file)
return host_so_file
def _FindSharedLibraryFromAPKs(output_directory, apks_directory, offset):
"""Find the shared library at the specifc offset of an APK file.
WARNING: This function will look at *all* the apks under
$output_directory/apks/ looking for native libraries they may contain at
|offset|, unless an APKs directory is explicitly specified.
This is error-prone, since a typical full Chrome build has more than a
hundred APKs these days, meaning that several APKs might actually match
the offset.
The function tries to detect this by looking at the names of the
extracted libraries. If they are all the same, it will consider that
as a success, and return its name, even if the APKs embed the same
library at different offsets!!
If there are more than one library at offset from the pool of all APKs,
the function prints an error message and fails.
Args:
output_directory: Chromium output directory.
apks_directory: A optional directory containing (only) the APK in question,
or in the case of a bundle, all split APKs. This overrides the default
apks directory derived from the output directory, and allows for
disambiguation.
offset: APK file offset, as extracted from the stack trace.
Returns:
A (library_name, host_library_path) tuple on success, or (None, None)
in case of failure.
"""
if apks_directory:
if not os.path.isdir(apks_directory):
raise Exception('Explicit APKs directory does not exist: %s',
repr(apks_directory))
else:
apks_directory = os.path.join(output_directory, 'apks')
if not os.path.isdir(apks_directory):
return (None, None)
apks = []
# Walk subdirectories here, in case the directory contains an unzipped bundle
# .apks file, with splits in it.
for d, _, files in os.walk(apks_directory):
apks.extend(
os.path.join(d, f) for f in files if os.path.splitext(f)[1] == '.apk')
shared_libraries = []
for apk in apks:
soname, sosize = GetUncompressedSharedLibraryFromAPK(apk, offset)
if soname == "":
continue
dirs = [output_directory] + [
os.path.join(output_directory, x)
for x in os.listdir(output_directory)
if os.path.exists(os.path.join(output_directory, x, 'lib.unstripped'))
]
host_so_file = _GetSharedLibraryInHost(soname, sosize, dirs)
if host_so_file:
shared_libraries += [(soname, host_so_file)]
# If there are more than one libraries found, it means detecting
# library failed.
number_of_library = len(shared_libraries)
if number_of_library == 1:
return shared_libraries[0]
elif number_of_library > 1:
logging.warning("More than one libraries could be loaded from APK.")
return (None, None)
def _FindAbi(lines):
for line in lines:
match = _ABI_LINE.search(line)
if match:
return match.group('abi')
| bsd-3-clause | -2,985,227,890,706,021,400 | 5,620,487,967,905,354,000 | 37.740506 | 148 | 0.63903 | false |
ibinti/intellij-community | python/helpers/py2only/docutils/utils/math/tex2unichar.py | 120 | 35109 | # -*- coding: utf-8 -*-
# LaTeX math to Unicode symbols translation dictionaries.
# Generated with ``write_tex2unichar.py`` from the data in
# http://milde.users.sourceforge.net/LUCR/Math/
# Includes commands from: wasysym, stmaryrd, mathdots, mathabx, esint, bbold, amsxtra, amsmath, amssymb, standard LaTeX
mathaccent = {
'acute': u'\u0301', # x́ COMBINING ACUTE ACCENT
'bar': u'\u0304', # x̄ COMBINING MACRON
'breve': u'\u0306', # x̆ COMBINING BREVE
'check': u'\u030c', # x̌ COMBINING CARON
'ddddot': u'\u20dc', # x⃜ COMBINING FOUR DOTS ABOVE
'dddot': u'\u20db', # x⃛ COMBINING THREE DOTS ABOVE
'ddot': u'\u0308', # ẍ COMBINING DIAERESIS
'dot': u'\u0307', # ẋ COMBINING DOT ABOVE
'grave': u'\u0300', # x̀ COMBINING GRAVE ACCENT
'hat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'mathring': u'\u030a', # x̊ COMBINING RING ABOVE
'not': u'\u0338', # x̸ COMBINING LONG SOLIDUS OVERLAY
'overleftarrow': u'\u20d6', # x⃖ COMBINING LEFT ARROW ABOVE
'overleftrightarrow': u'\u20e1', # x⃡ COMBINING LEFT RIGHT ARROW ABOVE
'overline': u'\u0305', # x̅ COMBINING OVERLINE
'overrightarrow': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'tilde': u'\u0303', # x̃ COMBINING TILDE
'underbar': u'\u0331', # x̱ COMBINING MACRON BELOW
'underleftarrow': u'\u20ee', # x⃮ COMBINING LEFT ARROW BELOW
'underline': u'\u0332', # x̲ COMBINING LOW LINE
'underrightarrow': u'\u20ef', # x⃯ COMBINING RIGHT ARROW BELOW
'vec': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'widehat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'widetilde': u'\u0303', # x̃ COMBINING TILDE
}
mathalpha = {
'Bbbk': u'\U0001d55c', # 𝕜 MATHEMATICAL DOUBLE-STRUCK SMALL K
'Delta': u'\u0394', # Δ GREEK CAPITAL LETTER DELTA
'Gamma': u'\u0393', # Γ GREEK CAPITAL LETTER GAMMA
'Im': u'\u2111', # ℑ BLACK-LETTER CAPITAL I
'Lambda': u'\u039b', # Λ GREEK CAPITAL LETTER LAMDA
'Omega': u'\u03a9', # Ω GREEK CAPITAL LETTER OMEGA
'Phi': u'\u03a6', # Φ GREEK CAPITAL LETTER PHI
'Pi': u'\u03a0', # Π GREEK CAPITAL LETTER PI
'Psi': u'\u03a8', # Ψ GREEK CAPITAL LETTER PSI
'Re': u'\u211c', # ℜ BLACK-LETTER CAPITAL R
'Sigma': u'\u03a3', # Σ GREEK CAPITAL LETTER SIGMA
'Theta': u'\u0398', # Θ GREEK CAPITAL LETTER THETA
'Upsilon': u'\u03a5', # Υ GREEK CAPITAL LETTER UPSILON
'Xi': u'\u039e', # Ξ GREEK CAPITAL LETTER XI
'aleph': u'\u2135', # ℵ ALEF SYMBOL
'alpha': u'\u03b1', # α GREEK SMALL LETTER ALPHA
'beta': u'\u03b2', # β GREEK SMALL LETTER BETA
'beth': u'\u2136', # ℶ BET SYMBOL
'chi': u'\u03c7', # χ GREEK SMALL LETTER CHI
'daleth': u'\u2138', # ℸ DALET SYMBOL
'delta': u'\u03b4', # δ GREEK SMALL LETTER DELTA
'digamma': u'\u03dc', # Ϝ GREEK LETTER DIGAMMA
'ell': u'\u2113', # ℓ SCRIPT SMALL L
'epsilon': u'\u03f5', # ϵ GREEK LUNATE EPSILON SYMBOL
'eta': u'\u03b7', # η GREEK SMALL LETTER ETA
'eth': u'\xf0', # ð LATIN SMALL LETTER ETH
'gamma': u'\u03b3', # γ GREEK SMALL LETTER GAMMA
'gimel': u'\u2137', # ℷ GIMEL SYMBOL
'hbar': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'hslash': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'imath': u'\u0131', # ı LATIN SMALL LETTER DOTLESS I
'iota': u'\u03b9', # ι GREEK SMALL LETTER IOTA
'jmath': u'\u0237', # ȷ LATIN SMALL LETTER DOTLESS J
'kappa': u'\u03ba', # κ GREEK SMALL LETTER KAPPA
'lambda': u'\u03bb', # λ GREEK SMALL LETTER LAMDA
'mu': u'\u03bc', # μ GREEK SMALL LETTER MU
'nu': u'\u03bd', # ν GREEK SMALL LETTER NU
'omega': u'\u03c9', # ω GREEK SMALL LETTER OMEGA
'phi': u'\u03d5', # ϕ GREEK PHI SYMBOL
'pi': u'\u03c0', # π GREEK SMALL LETTER PI
'psi': u'\u03c8', # ψ GREEK SMALL LETTER PSI
'rho': u'\u03c1', # ρ GREEK SMALL LETTER RHO
'sigma': u'\u03c3', # σ GREEK SMALL LETTER SIGMA
'tau': u'\u03c4', # τ GREEK SMALL LETTER TAU
'theta': u'\u03b8', # θ GREEK SMALL LETTER THETA
'upsilon': u'\u03c5', # υ GREEK SMALL LETTER UPSILON
'varDelta': u'\U0001d6e5', # 𝛥 MATHEMATICAL ITALIC CAPITAL DELTA
'varGamma': u'\U0001d6e4', # 𝛤 MATHEMATICAL ITALIC CAPITAL GAMMA
'varLambda': u'\U0001d6ec', # 𝛬 MATHEMATICAL ITALIC CAPITAL LAMDA
'varOmega': u'\U0001d6fa', # 𝛺 MATHEMATICAL ITALIC CAPITAL OMEGA
'varPhi': u'\U0001d6f7', # 𝛷 MATHEMATICAL ITALIC CAPITAL PHI
'varPi': u'\U0001d6f1', # 𝛱 MATHEMATICAL ITALIC CAPITAL PI
'varPsi': u'\U0001d6f9', # 𝛹 MATHEMATICAL ITALIC CAPITAL PSI
'varSigma': u'\U0001d6f4', # 𝛴 MATHEMATICAL ITALIC CAPITAL SIGMA
'varTheta': u'\U0001d6e9', # 𝛩 MATHEMATICAL ITALIC CAPITAL THETA
'varUpsilon': u'\U0001d6f6', # 𝛶 MATHEMATICAL ITALIC CAPITAL UPSILON
'varXi': u'\U0001d6ef', # 𝛯 MATHEMATICAL ITALIC CAPITAL XI
'varepsilon': u'\u03b5', # ε GREEK SMALL LETTER EPSILON
'varkappa': u'\U0001d718', # 𝜘 MATHEMATICAL ITALIC KAPPA SYMBOL
'varphi': u'\u03c6', # φ GREEK SMALL LETTER PHI
'varpi': u'\u03d6', # ϖ GREEK PI SYMBOL
'varrho': u'\u03f1', # ϱ GREEK RHO SYMBOL
'varsigma': u'\u03c2', # ς GREEK SMALL LETTER FINAL SIGMA
'vartheta': u'\u03d1', # ϑ GREEK THETA SYMBOL
'wp': u'\u2118', # ℘ SCRIPT CAPITAL P
'xi': u'\u03be', # ξ GREEK SMALL LETTER XI
'zeta': u'\u03b6', # ζ GREEK SMALL LETTER ZETA
}
mathbin = {
'Cap': u'\u22d2', # ⋒ DOUBLE INTERSECTION
'Circle': u'\u25cb', # ○ WHITE CIRCLE
'Cup': u'\u22d3', # ⋓ DOUBLE UNION
'LHD': u'\u25c0', # ◀ BLACK LEFT-POINTING TRIANGLE
'RHD': u'\u25b6', # ▶ BLACK RIGHT-POINTING TRIANGLE
'amalg': u'\u2a3f', # ⨿ AMALGAMATION OR COPRODUCT
'ast': u'\u2217', # ∗ ASTERISK OPERATOR
'barwedge': u'\u22bc', # ⊼ NAND
'bigtriangledown': u'\u25bd', # ▽ WHITE DOWN-POINTING TRIANGLE
'bigtriangleup': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'bindnasrepma': u'\u214b', # ⅋ TURNED AMPERSAND
'blacklozenge': u'\u29eb', # ⧫ BLACK LOZENGE
'blacktriangledown': u'\u25be', # ▾ BLACK DOWN-POINTING SMALL TRIANGLE
'blacktriangleleft': u'\u25c2', # ◂ BLACK LEFT-POINTING SMALL TRIANGLE
'blacktriangleright': u'\u25b8', # ▸ BLACK RIGHT-POINTING SMALL TRIANGLE
'blacktriangleup': u'\u25b4', # ▴ BLACK UP-POINTING SMALL TRIANGLE
'boxast': u'\u29c6', # ⧆ SQUARED ASTERISK
'boxbar': u'\u25eb', # ◫ WHITE SQUARE WITH VERTICAL BISECTING LINE
'boxbox': u'\u29c8', # ⧈ SQUARED SQUARE
'boxbslash': u'\u29c5', # ⧅ SQUARED FALLING DIAGONAL SLASH
'boxcircle': u'\u29c7', # ⧇ SQUARED SMALL CIRCLE
'boxdot': u'\u22a1', # ⊡ SQUARED DOT OPERATOR
'boxminus': u'\u229f', # ⊟ SQUARED MINUS
'boxplus': u'\u229e', # ⊞ SQUARED PLUS
'boxslash': u'\u29c4', # ⧄ SQUARED RISING DIAGONAL SLASH
'boxtimes': u'\u22a0', # ⊠ SQUARED TIMES
'bullet': u'\u2219', # ∙ BULLET OPERATOR
'cap': u'\u2229', # ∩ INTERSECTION
'cdot': u'\u22c5', # ⋅ DOT OPERATOR
'circ': u'\u2218', # ∘ RING OPERATOR
'circledast': u'\u229b', # ⊛ CIRCLED ASTERISK OPERATOR
'circledcirc': u'\u229a', # ⊚ CIRCLED RING OPERATOR
'circleddash': u'\u229d', # ⊝ CIRCLED DASH
'cup': u'\u222a', # ∪ UNION
'curlyvee': u'\u22ce', # ⋎ CURLY LOGICAL OR
'curlywedge': u'\u22cf', # ⋏ CURLY LOGICAL AND
'dagger': u'\u2020', # † DAGGER
'ddagger': u'\u2021', # ‡ DOUBLE DAGGER
'diamond': u'\u22c4', # ⋄ DIAMOND OPERATOR
'div': u'\xf7', # ÷ DIVISION SIGN
'divideontimes': u'\u22c7', # ⋇ DIVISION TIMES
'dotplus': u'\u2214', # ∔ DOT PLUS
'doublebarwedge': u'\u2a5e', # ⩞ LOGICAL AND WITH DOUBLE OVERBAR
'intercal': u'\u22ba', # ⊺ INTERCALATE
'interleave': u'\u2af4', # ⫴ TRIPLE VERTICAL BAR BINARY RELATION
'land': u'\u2227', # ∧ LOGICAL AND
'leftthreetimes': u'\u22cb', # ⋋ LEFT SEMIDIRECT PRODUCT
'lhd': u'\u25c1', # ◁ WHITE LEFT-POINTING TRIANGLE
'lor': u'\u2228', # ∨ LOGICAL OR
'ltimes': u'\u22c9', # ⋉ LEFT NORMAL FACTOR SEMIDIRECT PRODUCT
'mp': u'\u2213', # ∓ MINUS-OR-PLUS SIGN
'odot': u'\u2299', # ⊙ CIRCLED DOT OPERATOR
'ominus': u'\u2296', # ⊖ CIRCLED MINUS
'oplus': u'\u2295', # ⊕ CIRCLED PLUS
'oslash': u'\u2298', # ⊘ CIRCLED DIVISION SLASH
'otimes': u'\u2297', # ⊗ CIRCLED TIMES
'pm': u'\xb1', # ± PLUS-MINUS SIGN
'rhd': u'\u25b7', # ▷ WHITE RIGHT-POINTING TRIANGLE
'rightthreetimes': u'\u22cc', # ⋌ RIGHT SEMIDIRECT PRODUCT
'rtimes': u'\u22ca', # ⋊ RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT
'setminus': u'\u29f5', # ⧵ REVERSE SOLIDUS OPERATOR
'slash': u'\u2215', # ∕ DIVISION SLASH
'smallsetminus': u'\u2216', # ∖ SET MINUS
'smalltriangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'smalltriangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'smalltriangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'smalltriangleup': u'\u25b5', # ▵ WHITE UP-POINTING SMALL TRIANGLE
'sqcap': u'\u2293', # ⊓ SQUARE CAP
'sqcup': u'\u2294', # ⊔ SQUARE CUP
'sslash': u'\u2afd', # ⫽ DOUBLE SOLIDUS OPERATOR
'star': u'\u22c6', # ⋆ STAR OPERATOR
'talloblong': u'\u2afe', # ⫾ WHITE VERTICAL BAR
'times': u'\xd7', # × MULTIPLICATION SIGN
'triangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'triangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'triangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'triangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'uplus': u'\u228e', # ⊎ MULTISET UNION
'vartriangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'vee': u'\u2228', # ∨ LOGICAL OR
'veebar': u'\u22bb', # ⊻ XOR
'wedge': u'\u2227', # ∧ LOGICAL AND
'wr': u'\u2240', # ≀ WREATH PRODUCT
}
mathclose = {
'Rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'lrcorner': u'\u231f', # ⌟ BOTTOM RIGHT CORNER
'rangle': u'\u27e9', # ⟩ MATHEMATICAL RIGHT ANGLE BRACKET
'rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'rbrace': u'}', # } RIGHT CURLY BRACKET
'rbrack': u']', # ] RIGHT SQUARE BRACKET
'rceil': u'\u2309', # ⌉ RIGHT CEILING
'rfloor': u'\u230b', # ⌋ RIGHT FLOOR
'rgroup': u'\u27ef', # ⟯ MATHEMATICAL RIGHT FLATTENED PARENTHESIS
'rrbracket': u'\u27e7', # ⟧ MATHEMATICAL RIGHT WHITE SQUARE BRACKET
'rrparenthesis': u'\u2988', # ⦈ Z NOTATION RIGHT IMAGE BRACKET
'urcorner': u'\u231d', # ⌝ TOP RIGHT CORNER
'}': u'}', # } RIGHT CURLY BRACKET
}
mathfence = {
'Vert': u'\u2016', # ‖ DOUBLE VERTICAL LINE
'vert': u'|', # | VERTICAL LINE
'|': u'\u2016', # ‖ DOUBLE VERTICAL LINE
}
mathop = {
'Join': u'\u2a1d', # ⨝ JOIN
'bigcap': u'\u22c2', # ⋂ N-ARY INTERSECTION
'bigcup': u'\u22c3', # ⋃ N-ARY UNION
'biginterleave': u'\u2afc', # ⫼ LARGE TRIPLE VERTICAL BAR OPERATOR
'bigodot': u'\u2a00', # ⨀ N-ARY CIRCLED DOT OPERATOR
'bigoplus': u'\u2a01', # ⨁ N-ARY CIRCLED PLUS OPERATOR
'bigotimes': u'\u2a02', # ⨂ N-ARY CIRCLED TIMES OPERATOR
'bigsqcup': u'\u2a06', # ⨆ N-ARY SQUARE UNION OPERATOR
'biguplus': u'\u2a04', # ⨄ N-ARY UNION OPERATOR WITH PLUS
'bigvee': u'\u22c1', # ⋁ N-ARY LOGICAL OR
'bigwedge': u'\u22c0', # ⋀ N-ARY LOGICAL AND
'coprod': u'\u2210', # ∐ N-ARY COPRODUCT
'fatsemi': u'\u2a1f', # ⨟ Z NOTATION SCHEMA COMPOSITION
'fint': u'\u2a0f', # ⨏ INTEGRAL AVERAGE WITH SLASH
'iiiint': u'\u2a0c', # ⨌ QUADRUPLE INTEGRAL OPERATOR
'iiint': u'\u222d', # ∭ TRIPLE INTEGRAL
'iint': u'\u222c', # ∬ DOUBLE INTEGRAL
'int': u'\u222b', # ∫ INTEGRAL
'oiint': u'\u222f', # ∯ SURFACE INTEGRAL
'oint': u'\u222e', # ∮ CONTOUR INTEGRAL
'ointctrclockwise': u'\u2233', # ∳ ANTICLOCKWISE CONTOUR INTEGRAL
'prod': u'\u220f', # ∏ N-ARY PRODUCT
'sqint': u'\u2a16', # ⨖ QUATERNION INTEGRAL OPERATOR
'sum': u'\u2211', # ∑ N-ARY SUMMATION
'varointclockwise': u'\u2232', # ∲ CLOCKWISE CONTOUR INTEGRAL
}
mathopen = {
'Lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'langle': u'\u27e8', # ⟨ MATHEMATICAL LEFT ANGLE BRACKET
'lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'lbrace': u'{', # { LEFT CURLY BRACKET
'lbrack': u'[', # [ LEFT SQUARE BRACKET
'lceil': u'\u2308', # ⌈ LEFT CEILING
'lfloor': u'\u230a', # ⌊ LEFT FLOOR
'lgroup': u'\u27ee', # ⟮ MATHEMATICAL LEFT FLATTENED PARENTHESIS
'llbracket': u'\u27e6', # ⟦ MATHEMATICAL LEFT WHITE SQUARE BRACKET
'llcorner': u'\u231e', # ⌞ BOTTOM LEFT CORNER
'llparenthesis': u'\u2987', # ⦇ Z NOTATION LEFT IMAGE BRACKET
'ulcorner': u'\u231c', # ⌜ TOP LEFT CORNER
'{': u'{', # { LEFT CURLY BRACKET
}
mathord = {
'#': u'#', # # NUMBER SIGN
'$': u'$', # $ DOLLAR SIGN
'%': u'%', # % PERCENT SIGN
'&': u'&', # & AMPERSAND
'AC': u'\u223f', # ∿ SINE WAVE
'APLcomment': u'\u235d', # ⍝ APL FUNCTIONAL SYMBOL UP SHOE JOT
'APLdownarrowbox': u'\u2357', # ⍗ APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW
'APLinput': u'\u235e', # ⍞ APL FUNCTIONAL SYMBOL QUOTE QUAD
'APLinv': u'\u2339', # ⌹ APL FUNCTIONAL SYMBOL QUAD DIVIDE
'APLleftarrowbox': u'\u2347', # ⍇ APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW
'APLlog': u'\u235f', # ⍟ APL FUNCTIONAL SYMBOL CIRCLE STAR
'APLrightarrowbox': u'\u2348', # ⍈ APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW
'APLuparrowbox': u'\u2350', # ⍐ APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW
'Aries': u'\u2648', # ♈ ARIES
'CIRCLE': u'\u25cf', # ● BLACK CIRCLE
'CheckedBox': u'\u2611', # ☑ BALLOT BOX WITH CHECK
'Diamond': u'\u25c7', # ◇ WHITE DIAMOND
'Finv': u'\u2132', # Ⅎ TURNED CAPITAL F
'Game': u'\u2141', # ⅁ TURNED SANS-SERIF CAPITAL G
'Gemini': u'\u264a', # ♊ GEMINI
'Jupiter': u'\u2643', # ♃ JUPITER
'LEFTCIRCLE': u'\u25d6', # ◖ LEFT HALF BLACK CIRCLE
'LEFTcircle': u'\u25d0', # ◐ CIRCLE WITH LEFT HALF BLACK
'Leo': u'\u264c', # ♌ LEO
'Libra': u'\u264e', # ♎ LIBRA
'Mars': u'\u2642', # ♂ MALE SIGN
'Mercury': u'\u263f', # ☿ MERCURY
'Neptune': u'\u2646', # ♆ NEPTUNE
'Pluto': u'\u2647', # ♇ PLUTO
'RIGHTCIRCLE': u'\u25d7', # ◗ RIGHT HALF BLACK CIRCLE
'RIGHTcircle': u'\u25d1', # ◑ CIRCLE WITH RIGHT HALF BLACK
'Saturn': u'\u2644', # ♄ SATURN
'Scorpio': u'\u264f', # ♏ SCORPIUS
'Square': u'\u2610', # ☐ BALLOT BOX
'Sun': u'\u2609', # ☉ SUN
'Taurus': u'\u2649', # ♉ TAURUS
'Uranus': u'\u2645', # ♅ URANUS
'Venus': u'\u2640', # ♀ FEMALE SIGN
'XBox': u'\u2612', # ☒ BALLOT BOX WITH X
'Yup': u'\u2144', # ⅄ TURNED SANS-SERIF CAPITAL Y
'_': u'_', # _ LOW LINE
'angle': u'\u2220', # ∠ ANGLE
'aquarius': u'\u2652', # ♒ AQUARIUS
'aries': u'\u2648', # ♈ ARIES
'ast': u'*', # * ASTERISK
'backepsilon': u'\u03f6', # ϶ GREEK REVERSED LUNATE EPSILON SYMBOL
'backprime': u'\u2035', # ‵ REVERSED PRIME
'backslash': u'\\', # \ REVERSE SOLIDUS
'because': u'\u2235', # ∵ BECAUSE
'bigstar': u'\u2605', # ★ BLACK STAR
'binampersand': u'&', # & AMPERSAND
'blacklozenge': u'\u2b27', # ⬧ BLACK MEDIUM LOZENGE
'blacksmiley': u'\u263b', # ☻ BLACK SMILING FACE
'blacksquare': u'\u25fc', # ◼ BLACK MEDIUM SQUARE
'bot': u'\u22a5', # ⊥ UP TACK
'boy': u'\u2642', # ♂ MALE SIGN
'cancer': u'\u264b', # ♋ CANCER
'capricornus': u'\u2651', # ♑ CAPRICORN
'cdots': u'\u22ef', # ⋯ MIDLINE HORIZONTAL ELLIPSIS
'cent': u'\xa2', # ¢ CENT SIGN
'centerdot': u'\u2b1d', # ⬝ BLACK VERY SMALL SQUARE
'checkmark': u'\u2713', # ✓ CHECK MARK
'circlearrowleft': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'circlearrowright': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'circledR': u'\xae', # ® REGISTERED SIGN
'circledcirc': u'\u25ce', # ◎ BULLSEYE
'clubsuit': u'\u2663', # ♣ BLACK CLUB SUIT
'complement': u'\u2201', # ∁ COMPLEMENT
'dasharrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'dashleftarrow': u'\u21e0', # ⇠ LEFTWARDS DASHED ARROW
'dashrightarrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'diameter': u'\u2300', # ⌀ DIAMETER SIGN
'diamondsuit': u'\u2662', # ♢ WHITE DIAMOND SUIT
'earth': u'\u2641', # ♁ EARTH
'exists': u'\u2203', # ∃ THERE EXISTS
'female': u'\u2640', # ♀ FEMALE SIGN
'flat': u'\u266d', # ♭ MUSIC FLAT SIGN
'forall': u'\u2200', # ∀ FOR ALL
'fourth': u'\u2057', # ⁗ QUADRUPLE PRIME
'frownie': u'\u2639', # ☹ WHITE FROWNING FACE
'gemini': u'\u264a', # ♊ GEMINI
'girl': u'\u2640', # ♀ FEMALE SIGN
'heartsuit': u'\u2661', # ♡ WHITE HEART SUIT
'infty': u'\u221e', # ∞ INFINITY
'invneg': u'\u2310', # ⌐ REVERSED NOT SIGN
'jupiter': u'\u2643', # ♃ JUPITER
'ldots': u'\u2026', # … HORIZONTAL ELLIPSIS
'leftmoon': u'\u263e', # ☾ LAST QUARTER MOON
'leftturn': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'leo': u'\u264c', # ♌ LEO
'libra': u'\u264e', # ♎ LIBRA
'lnot': u'\xac', # ¬ NOT SIGN
'lozenge': u'\u25ca', # ◊ LOZENGE
'male': u'\u2642', # ♂ MALE SIGN
'maltese': u'\u2720', # ✠ MALTESE CROSS
'mathdollar': u'$', # $ DOLLAR SIGN
'measuredangle': u'\u2221', # ∡ MEASURED ANGLE
'mercury': u'\u263f', # ☿ MERCURY
'mho': u'\u2127', # ℧ INVERTED OHM SIGN
'nabla': u'\u2207', # ∇ NABLA
'natural': u'\u266e', # ♮ MUSIC NATURAL SIGN
'neg': u'\xac', # ¬ NOT SIGN
'neptune': u'\u2646', # ♆ NEPTUNE
'nexists': u'\u2204', # ∄ THERE DOES NOT EXIST
'notbackslash': u'\u2340', # ⍀ APL FUNCTIONAL SYMBOL BACKSLASH BAR
'partial': u'\u2202', # ∂ PARTIAL DIFFERENTIAL
'pisces': u'\u2653', # ♓ PISCES
'pluto': u'\u2647', # ♇ PLUTO
'pounds': u'\xa3', # £ POUND SIGN
'prime': u'\u2032', # ′ PRIME
'quarternote': u'\u2669', # ♩ QUARTER NOTE
'rightmoon': u'\u263d', # ☽ FIRST QUARTER MOON
'rightturn': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'sagittarius': u'\u2650', # ♐ SAGITTARIUS
'saturn': u'\u2644', # ♄ SATURN
'scorpio': u'\u264f', # ♏ SCORPIUS
'second': u'\u2033', # ″ DOUBLE PRIME
'sharp': u'\u266f', # ♯ MUSIC SHARP SIGN
'sim': u'~', # ~ TILDE
'slash': u'/', # / SOLIDUS
'smiley': u'\u263a', # ☺ WHITE SMILING FACE
'spadesuit': u'\u2660', # ♠ BLACK SPADE SUIT
'spddot': u'\xa8', # ¨ DIAERESIS
'sphat': u'^', # ^ CIRCUMFLEX ACCENT
'sphericalangle': u'\u2222', # ∢ SPHERICAL ANGLE
'sptilde': u'~', # ~ TILDE
'square': u'\u25fb', # ◻ WHITE MEDIUM SQUARE
'sun': u'\u263c', # ☼ WHITE SUN WITH RAYS
'taurus': u'\u2649', # ♉ TAURUS
'therefore': u'\u2234', # ∴ THEREFORE
'third': u'\u2034', # ‴ TRIPLE PRIME
'top': u'\u22a4', # ⊤ DOWN TACK
'triangleleft': u'\u25c5', # ◅ WHITE LEFT-POINTING POINTER
'triangleright': u'\u25bb', # ▻ WHITE RIGHT-POINTING POINTER
'twonotes': u'\u266b', # ♫ BEAMED EIGHTH NOTES
'uranus': u'\u2645', # ♅ URANUS
'varEarth': u'\u2641', # ♁ EARTH
'varnothing': u'\u2205', # ∅ EMPTY SET
'virgo': u'\u264d', # ♍ VIRGO
'wasylozenge': u'\u2311', # ⌑ SQUARE LOZENGE
'wasytherefore': u'\u2234', # ∴ THEREFORE
'yen': u'\xa5', # ¥ YEN SIGN
}
mathover = {
'overbrace': u'\u23de', # ⏞ TOP CURLY BRACKET
'wideparen': u'\u23dc', # ⏜ TOP PARENTHESIS
}
mathradical = {
'sqrt': u'\u221a', # √ SQUARE ROOT
'sqrt[3]': u'\u221b', # ∛ CUBE ROOT
'sqrt[4]': u'\u221c', # ∜ FOURTH ROOT
}
mathrel = {
'Bumpeq': u'\u224e', # ≎ GEOMETRICALLY EQUIVALENT TO
'Doteq': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'Downarrow': u'\u21d3', # ⇓ DOWNWARDS DOUBLE ARROW
'Leftarrow': u'\u21d0', # ⇐ LEFTWARDS DOUBLE ARROW
'Leftrightarrow': u'\u21d4', # ⇔ LEFT RIGHT DOUBLE ARROW
'Lleftarrow': u'\u21da', # ⇚ LEFTWARDS TRIPLE ARROW
'Longleftarrow': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'Longleftrightarrow': u'\u27fa', # ⟺ LONG LEFT RIGHT DOUBLE ARROW
'Longmapsfrom': u'\u27fd', # ⟽ LONG LEFTWARDS DOUBLE ARROW FROM BAR
'Longmapsto': u'\u27fe', # ⟾ LONG RIGHTWARDS DOUBLE ARROW FROM BAR
'Longrightarrow': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'Lsh': u'\u21b0', # ↰ UPWARDS ARROW WITH TIP LEFTWARDS
'Mapsfrom': u'\u2906', # ⤆ LEFTWARDS DOUBLE ARROW FROM BAR
'Mapsto': u'\u2907', # ⤇ RIGHTWARDS DOUBLE ARROW FROM BAR
'Rightarrow': u'\u21d2', # ⇒ RIGHTWARDS DOUBLE ARROW
'Rrightarrow': u'\u21db', # ⇛ RIGHTWARDS TRIPLE ARROW
'Rsh': u'\u21b1', # ↱ UPWARDS ARROW WITH TIP RIGHTWARDS
'Subset': u'\u22d0', # ⋐ DOUBLE SUBSET
'Supset': u'\u22d1', # ⋑ DOUBLE SUPERSET
'Uparrow': u'\u21d1', # ⇑ UPWARDS DOUBLE ARROW
'Updownarrow': u'\u21d5', # ⇕ UP DOWN DOUBLE ARROW
'VDash': u'\u22ab', # ⊫ DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'Vdash': u'\u22a9', # ⊩ FORCES
'Vvdash': u'\u22aa', # ⊪ TRIPLE VERTICAL BAR RIGHT TURNSTILE
'apprge': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'apprle': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'approx': u'\u2248', # ≈ ALMOST EQUAL TO
'approxeq': u'\u224a', # ≊ ALMOST EQUAL OR EQUAL TO
'asymp': u'\u224d', # ≍ EQUIVALENT TO
'backsim': u'\u223d', # ∽ REVERSED TILDE
'backsimeq': u'\u22cd', # ⋍ REVERSED TILDE EQUALS
'barin': u'\u22f6', # ⋶ ELEMENT OF WITH OVERBAR
'barleftharpoon': u'\u296b', # ⥫ LEFTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'barrightharpoon': u'\u296d', # ⥭ RIGHTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'between': u'\u226c', # ≬ BETWEEN
'bowtie': u'\u22c8', # ⋈ BOWTIE
'bumpeq': u'\u224f', # ≏ DIFFERENCE BETWEEN
'circeq': u'\u2257', # ≗ RING EQUAL TO
'coloneq': u'\u2254', # ≔ COLON EQUALS
'cong': u'\u2245', # ≅ APPROXIMATELY EQUAL TO
'corresponds': u'\u2259', # ≙ ESTIMATES
'curlyeqprec': u'\u22de', # ⋞ EQUAL TO OR PRECEDES
'curlyeqsucc': u'\u22df', # ⋟ EQUAL TO OR SUCCEEDS
'curvearrowleft': u'\u21b6', # ↶ ANTICLOCKWISE TOP SEMICIRCLE ARROW
'curvearrowright': u'\u21b7', # ↷ CLOCKWISE TOP SEMICIRCLE ARROW
'dashv': u'\u22a3', # ⊣ LEFT TACK
'ddots': u'\u22f1', # ⋱ DOWN RIGHT DIAGONAL ELLIPSIS
'dlsh': u'\u21b2', # ↲ DOWNWARDS ARROW WITH TIP LEFTWARDS
'doteq': u'\u2250', # ≐ APPROACHES THE LIMIT
'doteqdot': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'downarrow': u'\u2193', # ↓ DOWNWARDS ARROW
'downdownarrows': u'\u21ca', # ⇊ DOWNWARDS PAIRED ARROWS
'downdownharpoons': u'\u2965', # ⥥ DOWNWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'downharpoonleft': u'\u21c3', # ⇃ DOWNWARDS HARPOON WITH BARB LEFTWARDS
'downharpoonright': u'\u21c2', # ⇂ DOWNWARDS HARPOON WITH BARB RIGHTWARDS
'downuparrows': u'\u21f5', # ⇵ DOWNWARDS ARROW LEFTWARDS OF UPWARDS ARROW
'downupharpoons': u'\u296f', # ⥯ DOWNWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'drsh': u'\u21b3', # ↳ DOWNWARDS ARROW WITH TIP RIGHTWARDS
'eqcirc': u'\u2256', # ≖ RING IN EQUAL TO
'eqcolon': u'\u2255', # ≕ EQUALS COLON
'eqsim': u'\u2242', # ≂ MINUS TILDE
'eqslantgtr': u'\u2a96', # ⪖ SLANTED EQUAL TO OR GREATER-THAN
'eqslantless': u'\u2a95', # ⪕ SLANTED EQUAL TO OR LESS-THAN
'equiv': u'\u2261', # ≡ IDENTICAL TO
'fallingdotseq': u'\u2252', # ≒ APPROXIMATELY EQUAL TO OR THE IMAGE OF
'frown': u'\u2322', # ⌢ FROWN
'ge': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geq': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geqq': u'\u2267', # ≧ GREATER-THAN OVER EQUAL TO
'geqslant': u'\u2a7e', # ⩾ GREATER-THAN OR SLANTED EQUAL TO
'gets': u'\u2190', # ← LEFTWARDS ARROW
'gg': u'\u226b', # ≫ MUCH GREATER-THAN
'ggcurly': u'\u2abc', # ⪼ DOUBLE SUCCEEDS
'ggg': u'\u22d9', # ⋙ VERY MUCH GREATER-THAN
'gnapprox': u'\u2a8a', # ⪊ GREATER-THAN AND NOT APPROXIMATE
'gneq': u'\u2a88', # ⪈ GREATER-THAN AND SINGLE-LINE NOT EQUAL TO
'gneqq': u'\u2269', # ≩ GREATER-THAN BUT NOT EQUAL TO
'gnsim': u'\u22e7', # ⋧ GREATER-THAN BUT NOT EQUIVALENT TO
'gtrapprox': u'\u2a86', # ⪆ GREATER-THAN OR APPROXIMATE
'gtrdot': u'\u22d7', # ⋗ GREATER-THAN WITH DOT
'gtreqless': u'\u22db', # ⋛ GREATER-THAN EQUAL TO OR LESS-THAN
'gtreqqless': u'\u2a8c', # ⪌ GREATER-THAN ABOVE DOUBLE-LINE EQUAL ABOVE LESS-THAN
'gtrless': u'\u2277', # ≷ GREATER-THAN OR LESS-THAN
'gtrsim': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'hash': u'\u22d5', # ⋕ EQUAL AND PARALLEL TO
'hookleftarrow': u'\u21a9', # ↩ LEFTWARDS ARROW WITH HOOK
'hookrightarrow': u'\u21aa', # ↪ RIGHTWARDS ARROW WITH HOOK
'iddots': u'\u22f0', # ⋰ UP RIGHT DIAGONAL ELLIPSIS
'impliedby': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'implies': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'in': u'\u2208', # ∈ ELEMENT OF
'le': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leftarrow': u'\u2190', # ← LEFTWARDS ARROW
'leftarrowtail': u'\u21a2', # ↢ LEFTWARDS ARROW WITH TAIL
'leftarrowtriangle': u'\u21fd', # ⇽ LEFTWARDS OPEN-HEADED ARROW
'leftbarharpoon': u'\u296a', # ⥪ LEFTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'leftharpoondown': u'\u21bd', # ↽ LEFTWARDS HARPOON WITH BARB DOWNWARDS
'leftharpoonup': u'\u21bc', # ↼ LEFTWARDS HARPOON WITH BARB UPWARDS
'leftleftarrows': u'\u21c7', # ⇇ LEFTWARDS PAIRED ARROWS
'leftleftharpoons': u'\u2962', # ⥢ LEFTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB DOWN
'leftrightarrow': u'\u2194', # ↔ LEFT RIGHT ARROW
'leftrightarrows': u'\u21c6', # ⇆ LEFTWARDS ARROW OVER RIGHTWARDS ARROW
'leftrightarrowtriangle': u'\u21ff', # ⇿ LEFT RIGHT OPEN-HEADED ARROW
'leftrightharpoon': u'\u294a', # ⥊ LEFT BARB UP RIGHT BARB DOWN HARPOON
'leftrightharpoons': u'\u21cb', # ⇋ LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON
'leftrightsquigarrow': u'\u21ad', # ↭ LEFT RIGHT WAVE ARROW
'leftslice': u'\u2aa6', # ⪦ LESS-THAN CLOSED BY CURVE
'leftsquigarrow': u'\u21dc', # ⇜ LEFTWARDS SQUIGGLE ARROW
'leq': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leqq': u'\u2266', # ≦ LESS-THAN OVER EQUAL TO
'leqslant': u'\u2a7d', # ⩽ LESS-THAN OR SLANTED EQUAL TO
'lessapprox': u'\u2a85', # ⪅ LESS-THAN OR APPROXIMATE
'lessdot': u'\u22d6', # ⋖ LESS-THAN WITH DOT
'lesseqgtr': u'\u22da', # ⋚ LESS-THAN EQUAL TO OR GREATER-THAN
'lesseqqgtr': u'\u2a8b', # ⪋ LESS-THAN ABOVE DOUBLE-LINE EQUAL ABOVE GREATER-THAN
'lessgtr': u'\u2276', # ≶ LESS-THAN OR GREATER-THAN
'lesssim': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'lightning': u'\u21af', # ↯ DOWNWARDS ZIGZAG ARROW
'll': u'\u226a', # ≪ MUCH LESS-THAN
'llcurly': u'\u2abb', # ⪻ DOUBLE PRECEDES
'lll': u'\u22d8', # ⋘ VERY MUCH LESS-THAN
'lnapprox': u'\u2a89', # ⪉ LESS-THAN AND NOT APPROXIMATE
'lneq': u'\u2a87', # ⪇ LESS-THAN AND SINGLE-LINE NOT EQUAL TO
'lneqq': u'\u2268', # ≨ LESS-THAN BUT NOT EQUAL TO
'lnsim': u'\u22e6', # ⋦ LESS-THAN BUT NOT EQUIVALENT TO
'longleftarrow': u'\u27f5', # ⟵ LONG LEFTWARDS ARROW
'longleftrightarrow': u'\u27f7', # ⟷ LONG LEFT RIGHT ARROW
'longmapsfrom': u'\u27fb', # ⟻ LONG LEFTWARDS ARROW FROM BAR
'longmapsto': u'\u27fc', # ⟼ LONG RIGHTWARDS ARROW FROM BAR
'longrightarrow': u'\u27f6', # ⟶ LONG RIGHTWARDS ARROW
'looparrowleft': u'\u21ab', # ↫ LEFTWARDS ARROW WITH LOOP
'looparrowright': u'\u21ac', # ↬ RIGHTWARDS ARROW WITH LOOP
'mapsfrom': u'\u21a4', # ↤ LEFTWARDS ARROW FROM BAR
'mapsto': u'\u21a6', # ↦ RIGHTWARDS ARROW FROM BAR
'mid': u'\u2223', # ∣ DIVIDES
'models': u'\u22a7', # ⊧ MODELS
'multimap': u'\u22b8', # ⊸ MULTIMAP
'nLeftarrow': u'\u21cd', # ⇍ LEFTWARDS DOUBLE ARROW WITH STROKE
'nLeftrightarrow': u'\u21ce', # ⇎ LEFT RIGHT DOUBLE ARROW WITH STROKE
'nRightarrow': u'\u21cf', # ⇏ RIGHTWARDS DOUBLE ARROW WITH STROKE
'nVDash': u'\u22af', # ⊯ NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'nVdash': u'\u22ae', # ⊮ DOES NOT FORCE
'ncong': u'\u2247', # ≇ NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
'ne': u'\u2260', # ≠ NOT EQUAL TO
'nearrow': u'\u2197', # ↗ NORTH EAST ARROW
'neq': u'\u2260', # ≠ NOT EQUAL TO
'ngeq': u'\u2271', # ≱ NEITHER GREATER-THAN NOR EQUAL TO
'ngtr': u'\u226f', # ≯ NOT GREATER-THAN
'ni': u'\u220b', # ∋ CONTAINS AS MEMBER
'nleftarrow': u'\u219a', # ↚ LEFTWARDS ARROW WITH STROKE
'nleftrightarrow': u'\u21ae', # ↮ LEFT RIGHT ARROW WITH STROKE
'nleq': u'\u2270', # ≰ NEITHER LESS-THAN NOR EQUAL TO
'nless': u'\u226e', # ≮ NOT LESS-THAN
'nmid': u'\u2224', # ∤ DOES NOT DIVIDE
'notasymp': u'\u226d', # ≭ NOT EQUIVALENT TO
'notin': u'\u2209', # ∉ NOT AN ELEMENT OF
'notowner': u'\u220c', # ∌ DOES NOT CONTAIN AS MEMBER
'notslash': u'\u233f', # ⌿ APL FUNCTIONAL SYMBOL SLASH BAR
'nparallel': u'\u2226', # ∦ NOT PARALLEL TO
'nprec': u'\u2280', # ⊀ DOES NOT PRECEDE
'npreceq': u'\u22e0', # ⋠ DOES NOT PRECEDE OR EQUAL
'nrightarrow': u'\u219b', # ↛ RIGHTWARDS ARROW WITH STROKE
'nsim': u'\u2241', # ≁ NOT TILDE
'nsubseteq': u'\u2288', # ⊈ NEITHER A SUBSET OF NOR EQUAL TO
'nsucc': u'\u2281', # ⊁ DOES NOT SUCCEED
'nsucceq': u'\u22e1', # ⋡ DOES NOT SUCCEED OR EQUAL
'nsupseteq': u'\u2289', # ⊉ NEITHER A SUPERSET OF NOR EQUAL TO
'ntriangleleft': u'\u22ea', # ⋪ NOT NORMAL SUBGROUP OF
'ntrianglelefteq': u'\u22ec', # ⋬ NOT NORMAL SUBGROUP OF OR EQUAL TO
'ntriangleright': u'\u22eb', # ⋫ DOES NOT CONTAIN AS NORMAL SUBGROUP
'ntrianglerighteq': u'\u22ed', # ⋭ DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL
'nvDash': u'\u22ad', # ⊭ NOT TRUE
'nvdash': u'\u22ac', # ⊬ DOES NOT PROVE
'nwarrow': u'\u2196', # ↖ NORTH WEST ARROW
'owns': u'\u220b', # ∋ CONTAINS AS MEMBER
'parallel': u'\u2225', # ∥ PARALLEL TO
'perp': u'\u27c2', # ⟂ PERPENDICULAR
'pitchfork': u'\u22d4', # ⋔ PITCHFORK
'prec': u'\u227a', # ≺ PRECEDES
'precapprox': u'\u2ab7', # ⪷ PRECEDES ABOVE ALMOST EQUAL TO
'preccurlyeq': u'\u227c', # ≼ PRECEDES OR EQUAL TO
'preceq': u'\u2aaf', # ⪯ PRECEDES ABOVE SINGLE-LINE EQUALS SIGN
'precnapprox': u'\u2ab9', # ⪹ PRECEDES ABOVE NOT ALMOST EQUAL TO
'precnsim': u'\u22e8', # ⋨ PRECEDES BUT NOT EQUIVALENT TO
'precsim': u'\u227e', # ≾ PRECEDES OR EQUIVALENT TO
'propto': u'\u221d', # ∝ PROPORTIONAL TO
'restriction': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'rightarrow': u'\u2192', # → RIGHTWARDS ARROW
'rightarrowtail': u'\u21a3', # ↣ RIGHTWARDS ARROW WITH TAIL
'rightarrowtriangle': u'\u21fe', # ⇾ RIGHTWARDS OPEN-HEADED ARROW
'rightbarharpoon': u'\u296c', # ⥬ RIGHTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'rightharpoondown': u'\u21c1', # ⇁ RIGHTWARDS HARPOON WITH BARB DOWNWARDS
'rightharpoonup': u'\u21c0', # ⇀ RIGHTWARDS HARPOON WITH BARB UPWARDS
'rightleftarrows': u'\u21c4', # ⇄ RIGHTWARDS ARROW OVER LEFTWARDS ARROW
'rightleftharpoon': u'\u294b', # ⥋ LEFT BARB DOWN RIGHT BARB UP HARPOON
'rightleftharpoons': u'\u21cc', # ⇌ RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON
'rightrightarrows': u'\u21c9', # ⇉ RIGHTWARDS PAIRED ARROWS
'rightrightharpoons': u'\u2964', # ⥤ RIGHTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB DOWN
'rightslice': u'\u2aa7', # ⪧ GREATER-THAN CLOSED BY CURVE
'rightsquigarrow': u'\u21dd', # ⇝ RIGHTWARDS SQUIGGLE ARROW
'risingdotseq': u'\u2253', # ≓ IMAGE OF OR APPROXIMATELY EQUAL TO
'searrow': u'\u2198', # ↘ SOUTH EAST ARROW
'sim': u'\u223c', # ∼ TILDE OPERATOR
'simeq': u'\u2243', # ≃ ASYMPTOTICALLY EQUAL TO
'smallfrown': u'\u2322', # ⌢ FROWN
'smallsmile': u'\u2323', # ⌣ SMILE
'smile': u'\u2323', # ⌣ SMILE
'sqsubset': u'\u228f', # ⊏ SQUARE IMAGE OF
'sqsubseteq': u'\u2291', # ⊑ SQUARE IMAGE OF OR EQUAL TO
'sqsupset': u'\u2290', # ⊐ SQUARE ORIGINAL OF
'sqsupseteq': u'\u2292', # ⊒ SQUARE ORIGINAL OF OR EQUAL TO
'subset': u'\u2282', # ⊂ SUBSET OF
'subseteq': u'\u2286', # ⊆ SUBSET OF OR EQUAL TO
'subseteqq': u'\u2ac5', # ⫅ SUBSET OF ABOVE EQUALS SIGN
'subsetneq': u'\u228a', # ⊊ SUBSET OF WITH NOT EQUAL TO
'subsetneqq': u'\u2acb', # ⫋ SUBSET OF ABOVE NOT EQUAL TO
'succ': u'\u227b', # ≻ SUCCEEDS
'succapprox': u'\u2ab8', # ⪸ SUCCEEDS ABOVE ALMOST EQUAL TO
'succcurlyeq': u'\u227d', # ≽ SUCCEEDS OR EQUAL TO
'succeq': u'\u2ab0', # ⪰ SUCCEEDS ABOVE SINGLE-LINE EQUALS SIGN
'succnapprox': u'\u2aba', # ⪺ SUCCEEDS ABOVE NOT ALMOST EQUAL TO
'succnsim': u'\u22e9', # ⋩ SUCCEEDS BUT NOT EQUIVALENT TO
'succsim': u'\u227f', # ≿ SUCCEEDS OR EQUIVALENT TO
'supset': u'\u2283', # ⊃ SUPERSET OF
'supseteq': u'\u2287', # ⊇ SUPERSET OF OR EQUAL TO
'supseteqq': u'\u2ac6', # ⫆ SUPERSET OF ABOVE EQUALS SIGN
'supsetneq': u'\u228b', # ⊋ SUPERSET OF WITH NOT EQUAL TO
'supsetneqq': u'\u2acc', # ⫌ SUPERSET OF ABOVE NOT EQUAL TO
'swarrow': u'\u2199', # ↙ SOUTH WEST ARROW
'to': u'\u2192', # → RIGHTWARDS ARROW
'trianglelefteq': u'\u22b4', # ⊴ NORMAL SUBGROUP OF OR EQUAL TO
'triangleq': u'\u225c', # ≜ DELTA EQUAL TO
'trianglerighteq': u'\u22b5', # ⊵ CONTAINS AS NORMAL SUBGROUP OR EQUAL TO
'twoheadleftarrow': u'\u219e', # ↞ LEFTWARDS TWO HEADED ARROW
'twoheadrightarrow': u'\u21a0', # ↠ RIGHTWARDS TWO HEADED ARROW
'uparrow': u'\u2191', # ↑ UPWARDS ARROW
'updownarrow': u'\u2195', # ↕ UP DOWN ARROW
'updownarrows': u'\u21c5', # ⇅ UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW
'updownharpoons': u'\u296e', # ⥮ UPWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'upharpoonleft': u'\u21bf', # ↿ UPWARDS HARPOON WITH BARB LEFTWARDS
'upharpoonright': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'upuparrows': u'\u21c8', # ⇈ UPWARDS PAIRED ARROWS
'upupharpoons': u'\u2963', # ⥣ UPWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'vDash': u'\u22a8', # ⊨ TRUE
'varpropto': u'\u221d', # ∝ PROPORTIONAL TO
'vartriangleleft': u'\u22b2', # ⊲ NORMAL SUBGROUP OF
'vartriangleright': u'\u22b3', # ⊳ CONTAINS AS NORMAL SUBGROUP
'vdash': u'\u22a2', # ⊢ RIGHT TACK
'vdots': u'\u22ee', # ⋮ VERTICAL ELLIPSIS
}
mathunder = {
'underbrace': u'\u23df', # ⏟ BOTTOM CURLY BRACKET
}
space = {
':': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'medspace': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'quad': u'\u2001', # EM QUAD
}
| apache-2.0 | -4,293,114,052,776,101,000 | -8,423,690,309,385,758,000 | 50.285498 | 119 | 0.616182 | false |
pmaigutyak/mp-shop | delivery/models.py | 1 | 2389 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DeliveryMethod(models.Model):
name = models.CharField(_('Name'), max_length=255)
code = models.CharField(_('Code'), max_length=255, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Delivery method')
verbose_name_plural = _('Delivery methods')
class DeliveryMethodField(models.ForeignKey):
def __init__(
self,
to=DeliveryMethod,
verbose_name=_('Delivery method'),
on_delete=models.CASCADE,
null=True,
*args, **kwargs):
super().__init__(
to,
verbose_name=verbose_name,
on_delete=on_delete,
null=null,
*args, **kwargs)
class Region(models.Model):
name = models.CharField(_('Name'), max_length=255)
reference = models.CharField(_('Reference'), max_length=255)
def __str__(self):
if self.reference == '71508128-9b87-11de-822f-000c2965ae0e':
return self.name
return '{} {}'.format(self.name, _('region'))
class Meta:
verbose_name = _('Region')
verbose_name_plural = _('Regions')
class City(models.Model):
region = models.ForeignKey(
Region,
verbose_name=_('Region'),
related_name='cities',
on_delete=models.CASCADE)
name = models.CharField(_('Name'), max_length=255)
reference = models.CharField(_('Reference'), max_length=255)
def __str__(self):
return '{} - {}'.format(self.name, self.region)
class Meta:
verbose_name = _('City')
verbose_name_plural = _('Cities')
class Warehouse(models.Model):
city = models.ForeignKey(
City,
verbose_name=_('City'),
related_name='warehouses',
on_delete=models.CASCADE)
delivery_method = models.ForeignKey(
DeliveryMethod,
verbose_name=_('Delivery method'),
on_delete=models.CASCADE)
name = models.CharField(_('Name'), max_length=255, db_index=True)
reference = models.CharField(_('Reference'), max_length=255)
def __str__(self):
return '{}, {}, {}'.format(self.delivery_method, self.city, self.name)
class Meta:
verbose_name = _('Warehouse')
verbose_name_plural = _('Warehouses')
| isc | -7,564,370,237,590,360,000 | 8,238,683,650,794,245,000 | 23.628866 | 78 | 0.580159 | false |
mabotech/mabo.task | py/report/docx_gen.py | 2 | 1230 | # -*- coding: utf-8 -*-
from docx import Document
from docx.shared import Inches
document = Document()
document.add_heading(u'FT汽车', 0)
p = document.add_paragraph(u'汽车工程研究院 ')
p.add_run(u'试验中心').bold = True
p.add_run(u'试验数据管理系统')
p.add_run(u'项目二期。').italic = True
document.add_heading(u'报告说明Heading, level 2', level=2)
document.add_paragraph('Intense quote', style='IntenseQuote')
document.add_paragraph(
u'数据项1 first item in unordered list', style='ListBullet'
)
document.add_paragraph(
u'有序数据项1 first item in ordered list', style='ListNumber'
)
document.add_picture('foton.png', width=Inches(1.25))
document.add_picture('report1.png', width=Inches(3.25))
document.add_heading(
u'数据项', level=2
)
table = document.add_table(rows=1, cols=3, style='TableGrid')
table.autofit = True
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Qty'
hdr_cells[1].text = 'Id'
hdr_cells[2].text = 'Desc'
for item in xrange(0,4):
row_cells = table.add_row().cells
row_cells[0].text = str(item)
row_cells[1].text = str(item)
row_cells[2].text = "item.desc"
document.add_page_break()
document.save('foton_test2.docx') | mit | 8,847,983,389,838,428,000 | 8,967,633,119,870,674,000 | 21.529412 | 61 | 0.691638 | false |
jcfr/mystic | examples/constraint1_example01.py | 1 | 1202 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Example:
- Minimize Rosenbrock's Function with Powell's method.
Demonstrates:
- standard models
- minimal solver interface
- parameter constraints solver
- customized monitors
"""
# Powell's Directonal solver
from mystic.solvers import fmin_powell
# Rosenbrock function
from mystic.models import rosen
# tools
from mystic.monitors import VerboseMonitor
if __name__ == '__main__':
print "Powell's Method"
print "==============="
# initial guess
x0 = [0.8,1.2,0.7]
# define constraints function
def constraints(x):
# constrain the last x_i to be the same value as the first x_i
x[-1] = x[0]
return x
# configure monitor
stepmon = VerboseMonitor(1)
# use Powell's method to minimize the Rosenbrock function
solution = fmin_powell(rosen,x0,constraints=constraints,itermon=stepmon)
print solution
# end of file
| bsd-3-clause | -1,229,649,320,766,883,600 | -7,559,772,937,406,663,000 | 23.530612 | 78 | 0.686356 | false |
leiferikb/bitpop | src/tools/telemetry/telemetry/core/forwarders/cros_forwarder.py | 46 | 2295 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import subprocess
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.forwarders import do_nothing_forwarder
class CrOsForwarderFactory(forwarders.ForwarderFactory):
def __init__(self, cri):
super(CrOsForwarderFactory, self).__init__()
self._cri = cri
def Create(self, port_pairs, forwarding_flag='R'): # pylint: disable=W0221
if self._cri.local:
return do_nothing_forwarder.DoNothingForwarder(port_pairs)
return CrOsSshForwarder(self._cri, forwarding_flag, port_pairs)
class CrOsSshForwarder(forwarders.Forwarder):
def __init__(self, cri, forwarding_flag, port_pairs):
super(CrOsSshForwarder, self).__init__(port_pairs)
self._cri = cri
self._proc = None
self._forwarding_flag = forwarding_flag
if self._forwarding_flag == 'R':
command_line = ['-%s%i:%s:%i' % (self._forwarding_flag,
port_pair.remote_port,
self.host_ip,
port_pair.local_port)
for port_pair in port_pairs if port_pair]
else:
command_line = ['-%s%i:%s:%i' % (self._forwarding_flag,
port_pair.local_port,
self.host_ip,
port_pair.remote_port)
for port_pair in port_pairs if port_pair]
logging.debug('Forwarding to localhost:%d', port_pairs[0].local_port)
self._proc = subprocess.Popen(
self._cri.FormSSHCommandLine(['sleep', '999999999'], command_line),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False)
util.WaitFor(
lambda: self._cri.IsHTTPServerRunningOnPort(self.host_port), 60)
logging.debug('Server started on %s:%d', self.host_ip, self.host_port)
@property
def host_port(self):
return self._port_pairs.http.remote_port
def Close(self):
if self._proc:
self._proc.kill()
self._proc = None
super(CrOsSshForwarder, self).Close()
| gpl-3.0 | -8,738,173,977,229,307,000 | -1,556,746,453,834,479,900 | 34.859375 | 77 | 0.610022 | false |
joachimmetz/plaso | tests/parsers/winreg_plugins/windows_version.py | 2 | 7444 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the WinVer Windows Registry plugin."""
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.lib import definitions
from plaso.parsers.winreg_plugins import windows_version
from tests import test_lib as shared_test_lib
from tests.parsers.winreg_plugins import test_lib
class WindowsRegistryInstallationEventDataTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows installation event data attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = windows_version.WindowsRegistryInstallationEventData()
expected_attribute_names = [
'_event_data_stream_row_identifier', 'build_number', 'data_type',
'key_path', 'owner', 'parser', 'product_name', 'service_pack',
'version']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class WindowsVersionPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Windows version Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'CurrentVersion', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = 'Service Pack 1'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'CSDVersion', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=1892)
registry_key.AddValue(registry_value)
value_data = '5.1'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'CurrentVersion', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=1121)
registry_key.AddValue(registry_value)
value_data = b'\x13\x1aAP'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'InstallDate', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_LITTLE_ENDIAN, offset=1001)
registry_key.AddValue(registry_value)
value_data = 'MyTestOS'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'ProductName', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
value_data = 'A Concerned Citizen'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'RegisteredOwner', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=612)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = windows_version.WindowsVersionPlugin()
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
registry_key = self._CreateTestKey(key_path, '2012-08-31 20:09:55.123521')
plugin = windows_version.WindowsVersionPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_events, 2)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_values = (
'CSDVersion: [REG_SZ] Service Pack 1 '
'CurrentVersion: [REG_SZ] 5.1 '
'ProductName: [REG_SZ] MyTestOS '
'RegisteredOwner: [REG_SZ] A Concerned Citizen')
expected_event_values = {
'date_time': '2012-08-31 20:09:55.1235210',
'data_type': 'windows:registry:key_value',
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.NAME,
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN,
'values': expected_values}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'date_time': '2012-08-31 20:09:55',
'data_type': 'windows:registry:installation',
'key_path': key_path,
'owner': 'A Concerned Citizen',
'product_name': 'MyTestOS',
'service_pack': 'Service Pack 1',
'timestamp_desc': definitions.TIME_DESCRIPTION_INSTALLATION,
'version': '5.1'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testProcessFile(self):
"""Tests the Process function on a Windows Registry file."""
test_file_entry = self._GetTestFileEntry(['SOFTWARE-RunTests'])
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = windows_version.WindowsVersionPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 2)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_values = (
'BuildGUID: [REG_SZ] f4bf21b9-55fe-4ee8-a84b-0e91cbd5fe5d '
'BuildLab: [REG_SZ] 7601.win7sp1_gdr.111118-2330 '
'BuildLabEx: [REG_SZ] 7601.17727.amd64fre.win7sp1_gdr.111118-2330 '
'CSDBuildNumber: [REG_SZ] 1130 '
'CSDVersion: [REG_SZ] Service Pack 1 '
'CurrentBuild: [REG_SZ] 7601 '
'CurrentBuildNumber: [REG_SZ] 7601 '
'CurrentType: [REG_SZ] Multiprocessor Free '
'CurrentVersion: [REG_SZ] 6.1 '
'DigitalProductId: [REG_BINARY] (164 bytes) '
'DigitalProductId4: [REG_BINARY] (1272 bytes) '
'EditionID: [REG_SZ] Ultimate '
'InstallationType: [REG_SZ] Client '
'PathName: [REG_SZ] C:\\Windows '
'ProductId: [REG_SZ] 00426-065-0381817-86216 '
'ProductName: [REG_SZ] Windows 7 Ultimate '
'RegisteredOrganization: [REG_SZ] '
'RegisteredOwner: [REG_SZ] Windows User '
'SoftwareType: [REG_SZ] System '
'SystemRoot: [REG_SZ] C:\\Windows')
expected_event_values = {
'date_time': '2012-03-15 07:09:20.6718750',
'data_type': 'windows:registry:key_value',
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.NAME,
'values': expected_values}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,252,388,724,353,110,000 | -4,937,167,185,250,002,000 | 37.174359 | 80 | 0.682294 | false |
msiedlarek/grpc | src/python/grpcio/tests/unit/_links/_transmission_test.py | 9 | 10243 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests transmission of tickets across gRPC-on-the-wire."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.interfaces.links import links
from tests.unit import test_common
from tests.unit._links import _proto_scenarios
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.links import test_cases
from tests.unit.framework.interfaces.links import test_utilities
_IDENTITY = lambda x: x
class TransmissionTest(test_cases.TransmissionTest, unittest.TestCase):
def create_transmitting_links(self):
service_link = service.service_link(
{self.group_and_method(): self.deserialize_request},
{self.group_and_method(): self.serialize_response})
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', None,
{self.group_and_method(): self.serialize_request},
{self.group_and_method(): self.deserialize_response})
invocation_link.start()
return invocation_link, service_link
def destroy_transmitting_links(self, invocation_side_link, service_side_link):
invocation_side_link.stop()
service_side_link.begin_stop()
service_side_link.end_stop()
def create_invocation_initial_metadata(self):
return (
('first_invocation_initial_metadata_key', 'just a string value'),
('second_invocation_initial_metadata_key', '0123456789'),
('third_invocation_initial_metadata_key-bin', '\x00\x57' * 100),
)
def create_invocation_terminal_metadata(self):
return None
def create_service_initial_metadata(self):
return (
('first_service_initial_metadata_key', 'just another string value'),
('second_service_initial_metadata_key', '9876543210'),
('third_service_initial_metadata_key-bin', '\x00\x59\x02' * 100),
)
def create_service_terminal_metadata(self):
return (
('first_service_terminal_metadata_key', 'yet another string value'),
('second_service_terminal_metadata_key', 'abcdefghij'),
('third_service_terminal_metadata_key-bin', '\x00\x37' * 100),
)
def create_invocation_completion(self):
return None, None
def create_service_completion(self):
return (
beta_interfaces.StatusCode.OK, b'An exuberant test "details" message!')
def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
self.assertTrue(
test_common.metadata_transmitted(
original_metadata, transmitted_metadata),
'%s erroneously transmitted as %s' % (
original_metadata, transmitted_metadata))
class RoundTripTest(unittest.TestCase):
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_group = 'test package.Test Group'
test_method = 'test method'
identity_transformation = {(test_group, test_method): _IDENTITY}
test_code = beta_interfaces.StatusCode.OK
test_message = 'a test message'
service_link = service.service_link(
identity_transformation, identity_transformation)
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, None, None, identity_transformation, identity_transformation)
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, links.Ticket.Termination.COMPLETION, None)
invocation_link.accept_ticket(invocation_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_ticket = links.Ticket(
service_mate.tickets()[-1].operation_id, 0, None, None, None, None,
None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.begin_stop()
service_link.end_stop()
self.assertIs(
service_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(
invocation_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(invocation_mate.tickets()[-1].code, test_code)
self.assertEqual(invocation_mate.tickets()[-1].message, test_message)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_group, test_method = scenario.group_and_method()
test_code = beta_interfaces.StatusCode.OK
test_message = 'a scenario test message'
service_link = service.service_link(
{(test_group, test_method): scenario.deserialize_request},
{(test_group, test_method): scenario.serialize_response})
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', None,
{(test_group, test_method): scenario.serialize_request},
{(test_group, test_method): scenario.deserialize_response})
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, None, None)
invocation_link.accept_ticket(invocation_ticket)
requests = scenario.requests()
for request_index, request in enumerate(requests):
request_ticket = links.Ticket(
test_operation_id, 1 + request_index, None, None, None, None, 1, None,
request, None, None, None, None, None)
invocation_link.accept_ticket(request_ticket)
service_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
response_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_index, None, None,
None, None, 1, None, scenario.response_for_request(request), None,
None, None, None, None)
service_link.accept_ticket(response_ticket)
invocation_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
request_count = len(requests)
invocation_completion_ticket = links.Ticket(
test_operation_id, request_count + 1, None, None, None, None, None,
None, None, None, None, None, links.Ticket.Termination.COMPLETION,
None)
invocation_link.accept_ticket(invocation_completion_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_completion_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_count, None, None, None,
None, None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_completion_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.begin_stop()
service_link.end_stop()
observed_requests = tuple(
ticket.payload for ticket in service_mate.tickets()
if ticket.payload is not None)
observed_responses = tuple(
ticket.payload for ticket in invocation_mate.tickets()
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(observed_requests))
self.assertTrue(scenario.verify_responses(observed_responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | -8,928,144,277,437,858,000 | 8,662,435,273,029,835,000 | 41.857741 | 80 | 0.714342 | false |
sahpat229/POLLUTION | POLLUTION/settings.py | 1 | 3175 | """
Django settings for POLLUTION project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'egppn2gm^$=yub$1y*co6(#cb9=st%youf!=5@_p92%j^vxjbr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'POLLUTION.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'POLLUTION.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| mit | 4,842,021,011,867,934,000 | -8,915,552,791,744,638,000 | 25.239669 | 91 | 0.689764 | false |
joshblum/django-with-audit | django/contrib/gis/geometry/test_data.py | 364 | 2994 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import os
from django.contrib import gis
from django.utils import simplejson
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in d.iteritems()])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = simplejson.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| bsd-3-clause | -4,334,164,896,184,533,000 | -6,711,493,452,480,084,000 | 27.514286 | 78 | 0.611556 | false |
typesupply/dialogKit | examples/GlyphViewDemo.py | 3 | 3938 | from FL import *
from dialogKit import *
class GlyphViewDemo(object):
def __init__(self):
self.font= fl.font
self.glyphs = {}
for glyph in self.font.glyphs:
self.glyphs[glyph.name] = glyph
glyphNames = self.glyphs.keys()
glyphNames.sort()
#
self.w = ModalDialog((700, 500), 'GlyphView Demo')
self.w.glyphList = List((10, 10, 150, -60), glyphNames, callback=self.glyphListCallback)
self.w.view = GlyphView((170, 10, 400, -60), None, None)
#
self.w.fillCheckBox = CheckBox((580, 10, -10, 20), 'Fill', value=True, callback=self.viewOptionsCallback)
self.w.outlineCheckBox = CheckBox((580, 35, -10, 20), 'Outline', value=False, callback=self.viewOptionsCallback)
self.w.pointsCheckBox = CheckBox((580, 60, -10, 20), 'Points', value=True, callback=self.viewOptionsCallback)
self.w.descenderCheckBox = CheckBox((580, 85, -10, 20), 'Descender', value=True, callback=self.viewOptionsCallback)
self.w.baselineCheckBox = CheckBox((580, 110, -10, 20), 'Baseline', value=True, callback=self.viewOptionsCallback)
self.w.xHeightCheckBox = CheckBox((580, 135, -10, 20), 'X Height', value=True, callback=self.viewOptionsCallback)
self.w.ascenderCheckBox = CheckBox((580, 160, -10, 20), 'Ascender', value=True, callback=self.viewOptionsCallback)
self.w.capHeightCheckBox = CheckBox((580, 185, -10, 20), 'Cap Height', value=True, callback=self.viewOptionsCallback)
self.w.upmTopCheckBox = CheckBox((580, 210, -10, 20), 'UPM Top', value=False, callback=self.viewOptionsCallback)
self.w.leftCheckBox = CheckBox((580, 235, -10, 20), 'Left', value=True, callback=self.viewOptionsCallback)
self.w.rightCheckBox = CheckBox((580, 260, -10, 20), 'Right', value=True, callback=self.viewOptionsCallback)
#
self.w.open()
def glyphListCallback(self, sender):
selection = sender.getSelection()
if not selection:
font = glyph = None
else:
glyphName = sender[selection[0]]
glyph = self.glyphs[glyphName]
font = self.font
self.w.view.set(font, glyph)
self.w.view.update()
def viewOptionsCallback(self, sender):
if self.w.fillCheckBox.get() != self.w.view.getShowFill():
self.w.view.setShowFill(self.w.fillCheckBox.get())
if self.w.outlineCheckBox.get() != self.w.view.getShowOutline():
self.w.view.setShowOutline(self.w.outlineCheckBox.get())
if self.w.pointsCheckBox.get() != self.w.view.getShowOnCurvePoints():
self.w.view.setShowOnCurvePoints(self.w.pointsCheckBox.get())
if self.w.descenderCheckBox.get() != self.w.view.getShowDescender():
self.w.view.setShowDescender(self.w.descenderCheckBox.get())
if self.w.baselineCheckBox.get() != self.w.view.getShowBaseline():
self.w.view.setShowBaseline(self.w.baselineCheckBox.get())
if self.w.xHeightCheckBox.get() != self.w.view.getShowXHeight():
self.w.view.setShowXHeight(self.w.xHeightCheckBox.get())
if self.w.ascenderCheckBox.get() != self.w.view.getShowAscender():
self.w.view.setShowAscender(self.w.ascenderCheckBox.get())
if self.w.capHeightCheckBox.get() != self.w.view.getShowCapHeight():
self.w.view.setShowCapHeight(self.w.capHeightCheckBox.get())
if self.w.upmTopCheckBox.get() != self.w.view.getShowUPMTop():
self.w.view.setShowUPMTop(self.w.upmTopCheckBox.get())
if self.w.leftCheckBox.get() != self.w.view.getShowLeftSidebearing():
self.w.view.setShowLeftSidebearing(self.w.leftCheckBox.get())
if self.w.rightCheckBox.get() != self.w.view.getShowRightSidebearing():
self.w.view.setShowRightSidebearing(self.w.rightCheckBox.get())
self.w.view.update()
GlyphViewDemo() | mit | 6,258,010,898,928,361,000 | -516,588,185,397,638,100 | 56.926471 | 125 | 0.652108 | false |
vaygr/ansible | lib/ansible/module_utils/facts/system/chroot.py | 40 | 1029 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.collector import BaseFactCollector
def is_chroot():
is_chroot = None
if os.environ.get('debian_chroot', False):
is_chroot = True
else:
my_root = os.stat('/')
try:
# check if my file system is the root one
proc_root = os.stat('/proc/1/root/.')
is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev
except:
# I'm not root or no proc, fallback to checking it is inode #2
is_chroot = (my_root.st_ino != 2)
return is_chroot
class ChrootFactCollector(BaseFactCollector):
name = 'chroot'
_fact_ids = set(['is_chroot'])
def collect(self, module=None, collected_facts=None):
return {'is_chroot': is_chroot()}
| gpl-3.0 | -6,847,404,865,438,422,000 | 1,790,266,428,855,481,000 | 28.4 | 96 | 0.623907 | false |
Stanford-Online/edx-platform | openedx/core/djangoapps/oauth_dispatch/dot_overrides/validators.py | 10 | 5245 | """
Classes that override default django-oauth-toolkit behavior
"""
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.backends import AllowAllUsersModelBackend as UserModelBackend
from django.db.models.signals import pre_save
from django.dispatch import receiver
from oauth2_provider.models import AccessToken
from oauth2_provider.oauth2_validators import OAuth2Validator
from oauth2_provider.scopes import get_scopes_backend
from pytz import utc
from ratelimitbackend.backends import RateLimitMixin
from ..models import RestrictedApplication
@receiver(pre_save, sender=AccessToken)
def on_access_token_presave(sender, instance, *args, **kwargs): # pylint: disable=unused-argument
"""
Mark AccessTokens as expired for 'restricted applications' if required.
"""
if RestrictedApplication.should_expire_access_token(instance.application):
instance.expires = datetime(1970, 1, 1, tzinfo=utc)
class EdxRateLimitedAllowAllUsersModelBackend(RateLimitMixin, UserModelBackend):
"""
Authentication backend needed to incorporate rate limiting of login attempts - but also
enabling users with is_active of False in the Django auth_user model to still authenticate.
This is necessary for mobile users using 3rd party auth who have not activated their accounts,
Inactive users who use 1st party auth (username/password auth) will still fail login attempts,
just at a higher layer, in the login_user view.
See: https://openedx.atlassian.net/browse/TNL-4516
"""
pass
class EdxOAuth2Validator(OAuth2Validator):
"""
Validator class that implements edX-specific custom behavior:
* It allows users to log in with their email or username.
* It does not require users to be active before logging in.
"""
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Authenticate users, but allow inactive users (with u.is_active == False)
to authenticate.
"""
user = self._authenticate(username=username, password=password)
if user is not None:
request.user = user
return True
return False
def _authenticate(self, username, password):
"""
Authenticate the user, allowing the user to identify themselves either
by username or email
"""
authenticated_user = authenticate(username=username, password=password)
if authenticated_user is None:
UserModel = get_user_model() # pylint: disable=invalid-name
try:
email_user = UserModel.objects.get(email=username)
except UserModel.DoesNotExist:
authenticated_user = None
else:
authenticated_user = authenticate(username=email_user.username, password=password)
return authenticated_user
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Ensure that access tokens issued via client credentials grant are
associated with the owner of the ``Application``.
Also, update the `expires_in` value in the token response for
RestrictedApplications.
"""
grant_type = request.grant_type
user = request.user
if grant_type == 'client_credentials':
# Temporarily remove the grant type to avoid triggering the super method's code that removes request.user.
request.grant_type = None
# Ensure the tokens get associated with the correct user since DOT does not normally
# associate access tokens issued with the client_credentials grant to users.
request.user = request.client.user
super(EdxOAuth2Validator, self).save_bearer_token(token, request, *args, **kwargs)
if RestrictedApplication.should_expire_access_token(request.client):
# Since RestrictedApplications will override the DOT defined expiry, so that access_tokens
# are always expired, we need to re-read the token from the database and then calculate the
# expires_in (in seconds) from what we stored in the database. This value should be a negative
#value, meaning that it is already expired
access_token = AccessToken.objects.get(token=token['access_token'])
utc_now = datetime.utcnow().replace(tzinfo=utc)
expires_in = (access_token.expires - utc_now).total_seconds()
# assert that RestrictedApplications only issue expired tokens
# blow up processing if we see otherwise
assert expires_in < 0
token['expires_in'] = expires_in
# Restore the original request attributes
request.grant_type = grant_type
request.user = user
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure required scopes are permitted (as specified in the settings file)
"""
available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)
return set(scopes).issubset(set(available_scopes))
| agpl-3.0 | -3,458,667,064,096,680,000 | 3,589,447,211,401,428,000 | 40.96 | 118 | 0.689609 | false |
abligh/xen4.2-minideb | tools/python/xen/xend/server/BlktapController.py | 26 | 10719 | # Copyright (c) 2005, XenSource Ltd.
import string, re, os
from xen.xend.server.blkif import BlkifController
from xen.xend.XendLogging import log
from xen.util.xpopen import xPopen3
phantomDev = 0;
phantomId = 0;
blktap1_disk_types = [
'aio',
'sync',
'vmdk',
'ram',
'qcow',
'qcow2',
'ioemu',
]
blktap2_disk_types = [
'aio',
'ram',
'qcow',
'vhd',
'remus',
]
blktap_disk_types = blktap1_disk_types + blktap2_disk_types
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc,stdout,stderr)
# blktap1 device controller
class BlktapController(BlkifController):
def __init__(self, vm):
BlkifController.__init__(self, vm)
def frontendRoot(self):
"""@see DevController#frontendRoot"""
return "%s/device/vbd" % self.vm.getDomainPath()
def getDeviceDetails(self, config):
(devid, back, front) = BlkifController.getDeviceDetails(self, config)
phantomDevid = 0
wrapped = False
try:
imagetype = self.vm.info['image']['type']
except:
imagetype = ""
if imagetype == 'hvm':
tdevname = back['dev']
index = ['c', 'd', 'e', 'f', 'g', 'h', 'i', \
'j', 'l', 'm', 'n', 'o', 'p']
while True:
global phantomDev
global phantomId
import os, stat
phantomId = phantomId + 1
if phantomId == 16:
if index[phantomDev] == index[-1]:
if wrapped:
raise VmError(" No loopback block \
devices are available. ")
wrapped = True
phantomDev = 0
else:
phantomDev = phantomDev + 1
phantomId = 1
devname = 'xvd%s%d' % (index[phantomDev], phantomId)
try:
info = os.stat('/dev/%s' % devname)
except:
break
vbd = { 'mode': 'w', 'device': devname }
fn = 'tap:%s' % back['params']
# recurse ... by creating the vbd, then fallthrough
# and finish creating the original device
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
phantomDevid = dom0.create_phantom_vbd_with_vdi(vbd, fn)
# we need to wait for this device at a higher level
# the vbd that gets created will have a link to us
# and will let them do it there
# add a hook to point to the phantom device,
# root path is always the same (dom0 tap)
if phantomDevid != 0:
front['phantom_vbd'] = '/local/domain/0/backend/tap/0/%s' \
% str(phantomDevid)
return (devid, back, front)
class Blktap2Controller(BlktapController):
def __init__(self, vm):
BlktapController.__init__(self, vm)
def backendPath(self, backdom, devid):
if self.deviceClass == 'tap2':
deviceClass = 'vbd'
else:
deviceClass = 'tap'
return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(),
deviceClass,
self.vm.getDomid(), devid)
def getDeviceDetails(self, config):
(devid, back, front) = BlktapController.getDeviceDetails(self, config)
if self.deviceClass == 'tap2':
# since blktap2 uses blkback as a backend the 'params' feild contains
# the path to the blktap2 device (/dev/xen/blktap-2/tapdev*). As well,
# we need to store the params used to create the blktap2 device
# (tap:tapdisk:<driver>:/<image-path>)
tapdisk_uname = config.get('tapdisk_uname', '')
(_, tapdisk_params) = string.split(tapdisk_uname, ':', 1)
back['tapdisk-params'] = tapdisk_params
return (devid, back, front)
def getDeviceConfiguration(self, devid, transaction = None):
# this is a blktap2 device, so we need to overwrite the 'params' feild
# with the actual blktap2 parameters. (the vbd parameters are of little
# use to us)
config = BlktapController.getDeviceConfiguration(self, devid, transaction)
if transaction is None:
tapdisk_params = self.readBackend(devid, 'tapdisk-params')
else:
tapdisk_params = self.readBackendTxn(transaction, devid, 'tapdisk-params')
if tapdisk_params:
config['uname'] = 'tap:' + tapdisk_params
return config
def createDevice(self, config):
uname = config.get('uname', '')
try:
(typ, subtyp, params, file) = string.split(uname, ':', 3)
if subtyp not in ('tapdisk', 'ioemu'):
raise ValueError('invalid subtype')
except:
(typ, params, file) = string.split(uname, ':', 2)
subtyp = 'tapdisk'
if typ in ('tap'):
if subtyp in ('tapdisk', 'ioemu'):
if params not in blktap2_disk_types or \
TapdiskController.check():
# pass this device off to BlktapController
log.warn('WARNING: using deprecated blktap module')
self.deviceClass = 'tap'
devid = BlktapController.createDevice(self, config)
self.deviceClass = 'tap2'
return devid
device = TapdiskController.create(params, file)
# modify the configutration to create a blkback for the underlying
# blktap2 device. Note: we need to preserve the original tapdisk uname
# (it is used during save/restore and for managed domains).
config.update({'tapdisk_uname' : uname})
config.update({'uname' : 'phy:' + device.rstrip()})
devid = BlkifController.createDevice(self, config)
config.update({'uname' : uname})
config.pop('tapdisk_uname')
return devid
# This function is called from a thread when the
# domain is detached from the disk.
def finishDeviceCleanup(self, backpath, path):
"""Perform any device specific cleanup
@backpath backend xenstore path.
@path frontend device path
"""
#Figure out what we're going to wait on.
self.waitForBackend_destroy(backpath)
TapdiskController.destroy(path)
class TapdiskException(Exception):
pass
class TapdiskController(object):
'''class which encapsulates all tapdisk control operations'''
TAP_CTL = 'tap-ctl'
TAP_DEV = '/dev/xen/blktap-2/tapdev'
class Tapdisk(object):
def __init__(self, pid=None, minor=-1, state=None,
dtype='', image=None, device=None):
self.pid = pid
self.minor = minor
self.state = state
self.dtype = dtype
self.image = image
self.device = device
def __str__(self):
return 'image=%s pid=%s minor=%s state=%s type=%s device=%s' \
% (self.image, self.pid, self.minor, self.state, self.dtype,
self.device)
@staticmethod
def exc(*args):
rc, stdout, stderr = doexec([TapdiskController.TAP_CTL] + list(args))
out, err = stdout.read().strip(), stderr.read().strip()
stdout.close()
stderr.close()
if rc:
raise TapdiskException('%s failed (%s %s %s)' % \
(args, rc, out, err))
return out
@staticmethod
def check():
try:
TapdiskController.exc('check')
return 0
except Exception, e:
log.warn("tapdisk2 check failed: %s" % e)
return -1
@staticmethod
def list():
tapdisks = []
_list = TapdiskController.exc('list')
if not _list: return []
for line in _list.splitlines():
tapdisk = TapdiskController.Tapdisk()
# Since 'tap-ctl list' does not escape blanks in the path, hard-code the current format using 4 pairs to prevent splitting the path
for pair in line.split(None, 3):
key, value = pair.split('=', 1)
if key == 'pid':
tapdisk.pid = value
elif key == 'minor':
tapdisk.minor = int(value)
if tapdisk.minor >= 0:
tapdisk.device = '%s%s' % \
(TapdiskController.TAP_DEV, tapdisk.minor)
elif key == 'state':
tapdisk.state = value
elif key == 'args' and value.find(':') != -1:
tapdisk.dtype, tapdisk.image = value.split(':', 1)
tapdisks.append(tapdisk)
return tapdisks
@staticmethod
def fromDevice(device):
if device.startswith(TapdiskController.TAP_DEV):
minor = os.minor(os.stat(device).st_rdev)
tapdisks = filter(lambda x: x.minor == minor,
TapdiskController.list())
if len(tapdisks) == 1:
return tapdisks[0]
return None
@staticmethod
def create(dtype, image):
return TapdiskController.exc('create', '-a%s:%s' % (dtype, image))
@staticmethod
def destroy(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk:
if tapdisk.pid:
TapdiskController.exc('destroy',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
else:
TapdiskController.exc('free', '-m%s' % tapdisk.minor)
@staticmethod
def pause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('pause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
@staticmethod
def unpause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('unpause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
| gpl-2.0 | 8,933,012,367,277,276,000 | -3,064,451,287,297,457,000 | 33.246006 | 143 | 0.531673 | false |
zzliujianbo/shadowsocks | shadowsocks/utils.py | 1 | 11775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str
VERBOSE_LEVEL = 5
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config):
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in [b'127.0.0.1', b'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == b'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == b'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
exit(1)
def get_config(is_local):
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = json.loads(f.read().decode('utf8'),
object_hook=_decode_dict)
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_bytes(value)
elif key == '-m':
config['method'] = to_bytes(value)
elif key == '-b':
config['local_address'] = to_bytes(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '-d':
config['daemon'] = value
elif key == '--pid-file':
config['pid-file'] = value
elif key == '--log-file':
config['log-file'] = value
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = config.get('password', '')
config['method'] = config.get('method', 'aes-256-cfb')
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['workers'] = config.get('workers', 1)
config['verbose'] = config.get('verbose', False)
config['local_address'] = config.get('local_address', '127.0.0.1')
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = config.get('server', '0.0.0.0')
config['server_port'] = config.get('server_port', 8388)
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [-h] -s SERVER_ADDR [-p SERVER_PORT]
[-b LOCAL_ADDR] [-l LOCAL_PORT] -k PASSWORD [-m METHOD]
[-t TIMEOUT] [-c CONFIG] [--fast-open] [-v] -[d] [-q]
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-h, --help show this help message and exit
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [-h] [-s SERVER_ADDR] [-p SERVER_PORT] -k PASSWORD
-m METHOD [-t TIMEOUT] [-c CONFIG] [--fast-open]
[--workers WORKERS] [-v] [-d start] [-q]
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-h, --help show this help message and exit
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
General options:
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
| mit | -7,396,115,899,374,232,000 | 3,042,319,968,869,234,700 | 35.342593 | 79 | 0.569682 | false |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/wheel/signatures/ed25519py.py | 565 | 1695 | # -*- coding: utf-8 -*-
import warnings
import os
from collections import namedtuple
from . import djbec
__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair',
'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES']
PUBLICKEYBYTES=32
SECRETKEYBYTES=64
SIGNATUREBYTES=64
Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes)
def crypto_sign(msg, sk):
"""Return signature+message given message and secret key.
The signature is the first SIGNATUREBYTES bytes of the return value.
A copy of msg is in the remainder."""
if len(sk) != SECRETKEYBYTES:
raise ValueError("Bad signing key length %d" % len(sk))
vkbytes = sk[PUBLICKEYBYTES:]
skbytes = sk[:PUBLICKEYBYTES]
sig = djbec.signature(msg, skbytes, vkbytes)
return sig + msg
def crypto_sign_open(signed, vk):
"""Return message given signature+message and the verifying key."""
if len(vk) != PUBLICKEYBYTES:
raise ValueError("Bad verifying key length %d" % len(vk))
rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk)
if not rc:
raise ValueError("rc != True", rc)
return signed[SIGNATUREBYTES:]
| mit | -8,815,436,075,176,877,000 | 7,820,111,080,037,172,000 | 31.596154 | 81 | 0.666667 | false |
philipbl/home-assistant | homeassistant/components/sensor/serial_pm.py | 17 | 2799 | """
Support for particulate matter sensors connected to a serial port.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.serial_pm/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
REQUIREMENTS = ['pmsensor==0.3']
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = 'serial_device'
CONF_BRAND = 'brand'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BRAND): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the available PM sensors."""
from pmsensor import serial_pm as pm
try:
coll = pm.PMDataCollector(
config.get(CONF_SERIAL_DEVICE),
pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)]
)
except KeyError:
_LOGGER.error("Brand %s not supported\n supported brands: %s",
config.get(CONF_BRAND), pm.SUPPORTED_SENSORS.keys())
return
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE), err)
return
dev = []
for pmname in coll.supported_values():
if config.get(CONF_NAME) is None:
name = '{} PM{}'.format(config.get(CONF_NAME), pmname)
else:
name = 'PM{}'.format(pmname)
dev.append(ParticulateMatterSensor(coll, name, pmname))
add_devices(dev)
class ParticulateMatterSensor(Entity):
"""Representation of an Particulate matter sensor."""
def __init__(self, pmDataCollector, name, pmname):
"""Initialize a new PM sensor."""
self._name = name
self._pmname = pmname
self._state = None
self._collector = pmDataCollector
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "µg/m³"
def update(self):
"""Read from sensor and update the state."""
_LOGGER.debug("Reading data from PM sensor")
try:
self._state = self._collector.read_data()[self._pmname]
except KeyError:
_LOGGER.error("Could not read PM%s value", self._pmname)
def should_poll(self):
"""Sensor needs polling."""
return True
| mit | 6,524,745,483,087,645,000 | -5,545,026,538,993,450,000 | 28.442105 | 74 | 0.636754 | false |
TheTimmy/spack | lib/spack/external/_pytest/tmpdir.py | 12 | 4124 | """ support for providing temporary directories to test functions. """
import re
import pytest
import py
from _pytest.monkeypatch import MonkeyPatch
class TempdirFactory:
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option.
"""
def __init__(self, config):
self.config = config
self.trace = config.trace.get("tmpdir")
def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
#py.log._apiwarn(">1.1", "use tmpdir function argument")
return self.getbasetemp().ensure(string, dir=dir)
def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
basetemp = self.getbasetemp()
if not numbered:
p = basetemp.mkdir(basename)
else:
p = py.path.local.make_numbered_dir(prefix=basename,
keep=0, rootdir=basetemp, lock_timeout=None)
self.trace("mktemp", p)
return p
def getbasetemp(self):
""" return base temporary directory. """
try:
return self._basetemp
except AttributeError:
basetemp = self.config.option.basetemp
if basetemp:
basetemp = py.path.local(basetemp)
if basetemp.check():
basetemp.remove()
basetemp.mkdir()
else:
temproot = py.path.local.get_temproot()
user = get_user()
if user:
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.join('pytest-of-%s' % user)
else:
rootdir = temproot
rootdir.ensure(dir=1)
basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
rootdir=rootdir)
self._basetemp = t = basetemp.realpath()
self.trace("new basetemp", t)
return t
def finish(self):
self.trace("finish")
def get_user():
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010).
"""
import getpass
try:
return getpass.getuser()
except (ImportError, KeyError):
return None
# backward compatibility
TempdirHandler = TempdirFactory
def pytest_configure(config):
"""Create a TempdirFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmpdir_factory session fixture.
"""
mp = MonkeyPatch()
t = TempdirFactory(config)
config._cleanup.extend([mp.undo, t.finish])
mp.setattr(config, '_tmpdirhandler', t, raising=False)
mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
@pytest.fixture(scope='session')
def tmpdir_factory(request):
"""Return a TempdirFactory instance for the test session.
"""
return request.config._tmpdirhandler
@pytest.fixture
def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
"""
name = request.node.name
name = re.sub("[\W]", "_", name)
MAXVAL = 30
if len(name) > MAXVAL:
name = name[:MAXVAL]
x = tmpdir_factory.mktemp(name, numbered=True)
return x
| lgpl-2.1 | -1,997,554,897,483,443,700 | 8,343,535,850,554,234,000 | 32.258065 | 79 | 0.607177 | false |
cklb/PyMoskito | pymoskito/simulation_modules.py | 1 | 14724 | import logging
from copy import copy
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from PyQt5.QtCore import QObject
pyqtWrapperType = type(QObject)
__all__ = ["SimulationModule", "SimulationException",
"Trajectory", "Feedforward", "Controller", "Limiter",
"ModelMixer", "Model", "ModelException",
"Solver", "Disturbance", "Sensor", "ObserverMixer", "Observer"]
class SimulationModuleMeta(ABCMeta, pyqtWrapperType):
pass
class SimulationException(Exception):
pass
class SimulationModule(QObject, metaclass=SimulationModuleMeta):
"""
Smallest unit pof the simulation framework.
This class provides necessary functions like output calculation and holds
all settings that can be accessed by the user.
The :py:attr:`public_settings` are read by the
:py:class:`.SimulationInterface` and the rendered by the GUI. All entries
stated in this dictionary will be available as changeable settings for the
module.
On initialization, a possibly modified (in terms of its values) version of
this dict will be passed back to this class and is thenceforward available
via the :py:attr:`settings` property.
The most important method is :py:func:`calc_output` which is called by the
:py:class:`Simulator` to retrieve this modules output.
Args:
settings(OrderedDict): Settings for this simulation module.
These entries will be shown in the properties view and can be
changed by the user. The important entries for this base class are:
`output info`:
Dict holding an information dictionaries with keys `Name` and
`Unit` for each element in the output data.
If available, these information are used to display reasonable names
in the result view and to display the corresponding units for the
result plots.
Warn:
Do NOT use '.' in the `output_info` name field.
TODO:
Get rif of the point restriction
"""
def __init__(self, settings):
QObject.__init__(self, None)
self._logger = logging.getLogger(self.__class__.__name__)
assert isinstance(settings, dict)
self._settings = copy(settings)
self._settings["tick divider"] = settings.get("tick divider", 1)
self._settings["step width"] = None
self._settings.pop("modules", None)
@property
@abstractmethod
def public_settings(self):
pass
@property
def settings(self):
return self._settings
@property
def tick_divider(self):
return self._settings["tick divider"]
@property
def step_width(self):
return self._settings["step width"]
@step_width.setter
def step_width(self, value):
self._settings["step width"] = value
@abstractmethod
def calc_output(self, input_vector):
pass
class ModelException(SimulationException):
"""
Exception to be raised if the current system state violates modelling
assumptions.
"""
pass
class Model(SimulationModule):
"""
Base class for all user defined system models in state-space form.
Args:
settings (dict): Dictionary holding the config options for this module.
It must contain the following keys:
:input_count:
The length of the input vector for this model.
:state_count:
The length of the state vector for this model.
:initial state:
The initial state vector for this model.
"""
def __init__(self, settings):
SimulationModule.__init__(self, settings)
assert ("state_count" in settings)
assert ("input_count" in settings)
assert ("initial state" in settings)
assert len(settings["initial state"]) == settings["state_count"]
@property
def initial_state(self):
""" Return the initial state of the system. """
return self._settings["initial state"]
@abstractmethod
def state_function(self, t, x, args):
"""
Calculate the state derivatives of a system with state x at time t.
Args:
x(Array-like): System state.
t(float): System time.
Returns:
Temporal derivative of the system state at time t.
"""
pass
def root_function(self, x):
"""
Check whether a reinitialisation of the integrator should be performed.
This can be the case if there are discontinuities in the system dynamics
such as switching.
Args:
x(array-like): Current system state.
Returns:
tuple:
* bool: `True` if reset is advised.
* array-like: State to continue with.
"""
return False, x
def check_consistency(self, x):
"""
Check whether the assumptions, made in the modelling process are
violated.
Args:
x: Current system state
Raises:
:py:class:`ModelException` : If a violation is detected. This will
stop the simulation process.
"""
pass
class SolverException(SimulationException):
pass
class Solver(SimulationModule):
"""
Base Class for solver implementations
"""
def __init__(self, settings):
assert isinstance(settings["modules"]["Model"], Model)
self._model = settings["modules"]["Model"]
self.next_output = None
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
self.set_input(input_vector["system_input"])
output = self.next_output
self.next_output = self.integrate(input_vector["time"])
try:
self._model.check_consistency(self.next_output)
except ModelException as e:
raise SolverException("Timestep Integration failed! "
"Model raised: {0}".format(e))
return output
@abstractmethod
def set_input(self, *args):
pass
@abstractmethod
def integrate(self, t):
pass
@property
@abstractmethod
def t(self):
pass
@property
@abstractmethod
def successful(self):
pass
class ControllerException(SimulationException):
pass
class Controller(SimulationModule):
"""
Base class for controllers.
Args:
settings (dict): Dictionary holding the config options for this module.
It must contain the following keys:
:input_order:
The order of required derivatives from the trajectory generator.
:input_type:
Source for the feedback calculation and one of the following:
`system_state` , `system_output` , `Observer` or `Sensor` .
"""
# selectable input sources for controller
input_sources = ["system_state", "system_output", "Observer", "Sensor"]
def __init__(self, settings):
SimulationModule.__init__(self, settings)
assert ("input_order" in settings)
assert ("input_type" in settings)
assert (settings["input_type"] in self.input_sources)
@property
def input_order(self):
return self._settings["input_order"]
def calc_output(self, input_vector):
input_values = next((input_vector[src] for src in self.input_sources
if src == self._settings["input_type"]), None)
if input_values is None:
raise ControllerException("Selected Input not available")
trajectory_values = input_vector.get("Trajectory", None)
feedforward_values = input_vector.get("Feedforward", None)
return self._control(input_vector["time"], trajectory_values,
feedforward_values, input_values)
@abstractmethod
def _control(self, time, trajectory_values=None, feedforward_values=None,
input_values=None, **kwargs):
"""
Placeholder for control law calculations.
For more sophisticated implementations overload :py:func:`calc_output` .
Args:
time (float): Current time.
trajectory_values (array-like): Desired values from the trajectory
generator.
feedforward_values (array-like): Output of feedforward block.
input_values (array-like): The input values selected by
``input_type`` .
**kwargs: Placeholder for custom parameters.
Returns:
Array: Control output.
"""
pass
class Observer(SimulationModule):
"""
Base class for observers
"""
def __init__(self, settings):
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
system_input = input_vector.get("system_input", None)
if "ObserverMixer" in input_vector:
system_output = input_vector["ObserverMixer"]
elif "system_output" in input_vector:
system_output = input_vector["system_output"]
else:
raise SimulationException("No Observer input specified")
return self._observe(input_vector["time"], system_input, system_output)
@abstractmethod
def _observe(self, time, system_input, system_output):
"""
Placeholder for observer law.
Args:
time: Current time.
system_input: Current system input.
system_output: Current system output.
Returns:
Estimated system state
"""
pass
class Feedforward(SimulationModule):
"""
Base class for all feedforward implementations
"""
def __init__(self, settings):
self._model = settings["modules"]["Model"]
SimulationModule.__init__(self, settings)
assert ("input_order" in settings)
@property
def input_order(self):
return self._settings["input_order"]
def calc_output(self, input_dict):
return self._feedforward(input_dict["time"], input_dict["Trajectory"])
@abstractmethod
def _feedforward(self, time, trajectory_values):
"""
Placeholder for feedforward calculations.
Args:
time (float): Current time.
trajectory_values(array-like): Desired values from the trajectory
generator.
Returns:
Array: Feedforward output. This signal can be added to the
controllers output via the :py:class:`.ModelMixer` and is also
directly passed to the controller.
"""
pass
class TrajectoryException(SimulationException):
pass
class Trajectory(SimulationModule):
"""
Base class for all trajectory generators
"""
def __init__(self, settings):
control_order = 0
feedforward_order = 0
if "Controller" in settings["modules"].keys():
control_order = settings["modules"]["Controller"].input_order
if "Feedforward" in settings["modules"].keys():
feedforward_order = settings["modules"]["Feedforward"].input_order
settings.update(differential_order=max([control_order,
feedforward_order]))
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
desired = self._desired_values(input_vector["time"])
return desired
@abstractmethod
def _desired_values(self, t):
"""
Placeholder for calculations of desired values.
Args:
t (float): Time.
Returns:
Array: Trajectory output. This should always be a two-dimensional
array holding the components in to 0th and their derivatives in
the 1th axis.
"""
pass
class MixerException(Exception):
pass
class SignalMixer(SimulationModule):
"""
Base class for all Signal mixing modules
"""
def __init__(self, settings):
assert "input signals" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_vector):
signals = [value for signal, value in input_vector.items()
if signal in self._settings["input signals"]]
return self._mix(signals)
class ModelMixer(SignalMixer):
pass
class ObserverMixer(SignalMixer):
pass
class Limiter(SimulationModule):
"""
Base class for all limiter variants
"""
def __init__(self, settings):
assert "input_signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._limit(input_dict[self._settings["input_signal"]])
def _limit(self, values):
"""
Placeholder for actual limit calculations.
Args:
values(array-like): Values to limit.
Returns:
Array: Limited output.
"""
return values
class Sensor(SimulationModule):
"""
Base class for all sensor variants
"""
def __init__(self, settings):
assert "input signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._measure(input_dict[self._settings["input signal"]])
def _measure(self, value):
"""
Placeholder for measurement calculations.
One may reorder or remove state elements or introduce measurement delays
here.
Args:
value (array-like float): Values from the source selected by the
``input_signal`` property.
Returns:
array-like float: 'Measured' values.
"""
return value
class Disturbance(SimulationModule):
"""
Base class for all disturbance variants
"""
def __init__(self, settings):
assert "input signal" in settings
SimulationModule.__init__(self, settings)
def calc_output(self, input_dict):
return self._disturb(input_dict[self._settings["input signal"]])
@abstractmethod
def _disturb(self, value):
"""
Placeholder for disturbance calculations.
If the noise is to be dependent on the measured signal use its `value`
to create the noise.
Args:
value (array-like float): Values from the source selected by the
``input_signal`` property.
Returns:
array-like float: Noise that will be mixed with a signal later on.
"""
pass
| bsd-3-clause | 2,345,098,433,497,255,400 | 1,180,590,384,369,973,000 | 27.927308 | 84 | 0.609006 | false |
mcedit/mcedit | albow/menu_bar.py | 1 | 1799 | #
# Albow - Menu bar
#
from pygame import Rect
from widget import Widget, overridable_property
class MenuBar(Widget):
menus = overridable_property('menus', "List of Menu instances")
def __init__(self, menus=None, width=0, **kwds):
font = self.predict_font(kwds)
height = font.get_linesize()
Widget.__init__(self, Rect(0, 0, width, height), **kwds)
self.menus = menus or []
self._hilited_menu = None
def get_menus(self):
return self._menus
def set_menus(self, x):
self._menus = x
def draw(self, surf):
fg = self.fg_color
bg = self.bg_color
font = self.font
hilited = self._hilited_menu
x = 0
for menu in self._menus:
text = " %s " % menu.title
if menu is hilited:
buf = font.render(text, True, bg, fg)
else:
buf = font.render(text, True, fg, bg)
surf.blit(buf, (x, 0))
x += surf.get_width()
def mouse_down(self, e):
mx = e.local[0]
font = self.font
x = 0
for menu in self._menus:
text = " %s " % menu.title
w = font.size(text)[0]
if x <= mx < x + w:
self.show_menu(menu, x)
def show_menu(self, menu, x):
self._hilited_menu = menu
try:
i = menu.present(self, (x, self.height))
finally:
self._hilited_menu = None
menu.invoke_item(i)
def handle_command_key(self, e):
menus = self.menus
for m in xrange(len(menus) - 1, -1, -1):
menu = menus[m]
i = menu.find_item_for_key(e)
if i >= 0:
menu.invoke_item(i)
return True
return False
| isc | -4,023,508,795,556,966,000 | 882,007,613,751,011,100 | 25.850746 | 67 | 0.496943 | false |
justinhayes/cm_api | python/src/cm_api/endpoints/roles.py | 1 | 8270 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cm_api.endpoints.types import *
__docformat__ = "epytext"
ROLES_PATH = "/clusters/%s/services/%s/roles"
CM_ROLES_PATH = "/cm/service/roles"
def _get_roles_path(cluster_name, service_name):
if cluster_name:
return ROLES_PATH % (cluster_name, service_name)
else:
return CM_ROLES_PATH
def _get_role_path(cluster_name, service_name, role_name):
path = _get_roles_path(cluster_name, service_name)
return "%s/%s" % (path, role_name)
def create_role(resource_root,
service_name,
role_type,
role_name,
host_id,
cluster_name="default"):
"""
Create a role
@param resource_root: The root Resource object.
@param service_name: Service name
@param role_type: Role type
@param role_name: Role name
@param cluster_name: Cluster name
@return: An ApiRole object
"""
apirole = ApiRole(resource_root, role_name, role_type,
ApiHostRef(resource_root, host_id))
return call(resource_root.post,
_get_roles_path(cluster_name, service_name),
ApiRole, True, data=[apirole])[0]
def get_role(resource_root, service_name, name, cluster_name="default"):
"""
Lookup a role by name
@param resource_root: The root Resource object.
@param service_name: Service name
@param name: Role name
@param cluster_name: Cluster name
@return: An ApiRole object
"""
return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))
def _get_role(resource_root, path):
return call(resource_root.get, path, ApiRole)
def get_all_roles(resource_root, service_name, cluster_name="default", view=None):
"""
Get all roles
@param resource_root: The root Resource object.
@param service_name: Service name
@param cluster_name: Cluster name
@return: A list of ApiRole objects.
"""
return call(resource_root.get,
_get_roles_path(cluster_name, service_name),
ApiRole, True, params=view and dict(view=view) or None)
def get_roles_by_type(resource_root, service_name, role_type,
cluster_name="default", view=None):
"""
Get all roles of a certain type in a service
@param resource_root: The root Resource object.
@param service_name: Service name
@param role_type: Role type
@param cluster_name: Cluster name
@return: A list of ApiRole objects.
"""
roles = get_all_roles(resource_root, service_name, cluster_name, view)
return [ r for r in roles if r.type == role_type ]
def delete_role(resource_root, service_name, name, cluster_name="default"):
"""
Delete a role by name
@param resource_root: The root Resource object.
@param service_name: Service name
@param name: Role name
@param cluster_name: Cluster name
@return: The deleted ApiRole object
"""
return call(resource_root.delete,
_get_role_path(cluster_name, service_name, name), ApiRole)
class ApiRole(BaseApiResource):
_ATTRIBUTES = {
'name' : None,
'type' : None,
'hostRef' : Attr(ApiHostRef),
'roleState' : ROAttr(),
'healthSummary' : ROAttr(),
'healthChecks' : ROAttr(),
'serviceRef' : ROAttr(ApiServiceRef),
'configStale' : ROAttr(),
'configStalenessStatus' : ROAttr(),
'haStatus' : ROAttr(),
'roleUrl' : ROAttr(),
'commissionState' : ROAttr(),
'maintenanceMode' : ROAttr(),
'maintenanceOwners' : ROAttr(),
'roleConfigGroupRef' : ROAttr(ApiRoleConfigGroupRef),
'zooKeeperServerMode' : ROAttr(),
}
def __init__(self, resource_root, name=None, type=None, hostRef=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiRole>: %s (cluster: %s; service: %s)" % (
self.name, self.serviceRef.clusterName, self.serviceRef.serviceName)
def _path(self):
return _get_role_path(self.serviceRef.clusterName,
self.serviceRef.serviceName,
self.name)
def _get_log(self, log):
path = "%s/logs/%s" % (self._path(), log)
return self._get_resource_root().get(path)
def get_commands(self, view=None):
"""
Retrieve a list of running commands for this role.
@param view: View to materialize ('full' or 'summary')
@return: A list of running commands.
"""
return self._get("commands", ApiCommand, True,
params = view and dict(view=view) or None)
def get_config(self, view = None):
"""
Retrieve the role's configuration.
The 'summary' view contains strings as the dictionary values. The full
view contains ApiConfig instances as the values.
@param view: View to materialize ('full' or 'summary')
@return: Dictionary with configuration data.
"""
return self._get_config("config", view)
def update_config(self, config):
"""
Update the role's configuration.
@param config: Dictionary with configuration to update.
@return: Dictionary with updated configuration.
"""
return self._update_config("config", config)
def get_full_log(self):
"""
Retrieve the contents of the role's log file.
@return: Contents of log file.
"""
return self._get_log('full')
def get_stdout(self):
"""
Retrieve the contents of the role's standard output.
@return: Contents of stdout.
"""
return self._get_log('stdout')
def get_stderr(self):
"""
Retrieve the contents of the role's standard error.
@return: Contents of stderr.
"""
return self._get_log('stderr')
def get_metrics(self, from_time=None, to_time=None, metrics=None, view=None):
"""
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a role with the timeseries API use
the query:
'select * where roleName = $ROLE_NAME'.
To get specific metrics for a role use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where roleName = $ROLE_NAME'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
"""
return self._get_resource_root().get_metrics(self._path() + '/metrics',
from_time, to_time, metrics, view)
def enter_maintenance_mode(self):
"""
Put the role in maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('enterMaintenanceMode')
if cmd.success:
self._update(_get_role(self._get_resource_root(), self._path()))
return cmd
def exit_maintenance_mode(self):
"""
Take the role out of maintenance mode.
@return: Reference to the completed command.
@since: API v2
"""
cmd = self._cmd('exitMaintenanceMode')
if cmd.success:
self._update(_get_role(self._get_resource_root(), self._path()))
return cmd
def list_commands_by_name(self):
"""
Lists all the commands that can be executed by name
on the provided role.
@return: A list of command metadata objects
@since: API v6
"""
return self._get("commandsByName", ApiCommandMetadata, True, api_version=6)
| apache-2.0 | -109,038,361,115,153,490 | -6,548,095,807,397,393,000 | 31.431373 | 83 | 0.655381 | false |
rememberlenny/google-course-builder | modules/oeditor/oeditor.py | 9 | 10589 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic object editor view that uses REST services."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import urllib
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
import jinja2
from models import custom_modules
from models import transforms
import webapp2
# a set of YUI and inputex modules required by the editor
COMMON_REQUIRED_MODULES = [
'inputex-group', 'inputex-form', 'inputex-jsonschema']
ALL_MODULES = [
'querystring-stringify-simple', 'inputex-select', 'inputex-string',
'inputex-radio', 'inputex-date', 'inputex-datepicker', 'inputex-checkbox',
'inputex-list', 'inputex-color', 'gcb-rte', 'inputex-textarea',
'inputex-url', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-file', 'io-upload-iframe']
class ObjectEditor(object):
"""Generic object editor powered by jsonschema."""
@classmethod
def get_html_for(
cls, handler, schema_json, annotations, object_key,
rest_url, exit_url,
extra_args=None,
save_method='put',
delete_url=None, delete_message=None, delete_method='post',
auto_return=False, read_only=False,
required_modules=None,
extra_js_files=None,
delete_button_caption='Delete',
save_button_caption='Save',
exit_button_caption='Close'):
"""Creates an HTML code needed to embed and operate this form.
This method creates an HTML, JS and CSS required to embed JSON
schema-based object editor into a view.
Args:
handler: a BaseHandler class, which will host this HTML, JS and CSS
schema_json: a text of JSON schema for the object being edited
annotations: schema annotations dictionary
object_key: a key of an object being edited
rest_url: a REST endpoint for object GET/PUT operation
exit_url: a URL to go to after the editor form is dismissed
extra_args: extra request params passed back in GET and POST
save_method: how the data should be saved to the server (put|upload)
delete_url: optional URL for delete operation
delete_message: string. Optional custom delete confirmation message
delete_method: optional HTTP method for delete operation
auto_return: whether to return to the exit_url on successful save
read_only: optional flag; if set, removes Save and Delete operations
required_modules: list of inputex modules required for this editor
extra_js_files: list of extra JS files to be included
delete_button_caption: string. A caption for the 'Delete' button
save_button_caption: a caption for the 'Save' button
exit_button_caption: a caption for the 'Close' button
Returns:
The HTML, JS and CSS text that will instantiate an object editor.
"""
required_modules = required_modules or ALL_MODULES
if not delete_message:
kind = transforms.loads(schema_json).get('description')
if not kind:
kind = 'Generic Object'
delete_message = 'Are you sure you want to delete this %s?' % kind
# construct parameters
get_url = rest_url
get_args = {'key': object_key}
post_url = rest_url
post_args = {'key': object_key}
if extra_args:
get_args.update(extra_args)
post_args.update(extra_args)
if read_only:
post_url = ''
post_args = ''
custom_rte_tag_icons = []
for tag, tag_class in tags.get_tag_bindings().items():
custom_rte_tag_icons.append({
'name': tag,
'iconUrl': tag_class().get_icon_url()})
template_values = {
'enabled': custom_module.enabled,
'schema': schema_json,
'get_url': '%s?%s' % (get_url, urllib.urlencode(get_args, True)),
'save_url': post_url,
'save_args': transforms.dumps(post_args),
'exit_button_caption': exit_button_caption,
'exit_url': exit_url,
'required_modules': COMMON_REQUIRED_MODULES + required_modules,
'extra_js_files': extra_js_files or [],
'schema_annotations': [
(item[0], transforms.dumps(item[1])) for item in annotations],
'save_method': save_method,
'auto_return': auto_return,
'delete_button_caption': delete_button_caption,
'save_button_caption': save_button_caption,
'custom_rte_tag_icons': transforms.dumps(custom_rte_tag_icons),
'delete_message': delete_message,
}
if delete_url and not read_only:
template_values['delete_url'] = delete_url
if delete_method:
template_values['delete_method'] = delete_method
if appengine_config.BUNDLE_LIB_FILES:
template_values['bundle_lib_files'] = True
return jinja2.utils.Markup(handler.get_template(
'oeditor.html', [os.path.dirname(__file__)]
).render(template_values))
class PopupHandler(webapp2.RequestHandler, utils.ReflectiveRequestHandler):
"""A handler to serve the content of the popup subeditor."""
default_action = 'custom_tag'
get_actions = ['edit_custom_tag', 'add_custom_tag']
post_actions = []
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)])
def get_edit_custom_tag(self):
"""Return the the page used to edit a custom HTML tag in a popup."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
tag_class = tag_bindings[tag_name]
schema = tag_class().get_schema(self)
if schema.has_subregistries():
raise NotImplementedError()
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None)
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def get_add_custom_tag(self):
"""Return the page for the popup used to add a custom HTML tag."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
select_data = []
for name in tag_bindings.keys():
clazz = tag_bindings[name]
select_data.append((name, '%s: %s' % (
clazz.vendor(), clazz.name())))
select_data = sorted(select_data, key=lambda pair: pair[1])
if tag_name:
tag_class = tag_bindings[tag_name]
else:
tag_class = tag_bindings[select_data[0][0]]
tag_schema = tag_class().get_schema(self)
schema = schema_fields.FieldRegistry('Add a Component')
type_select = schema.add_sub_registry('type', 'Component Type')
type_select.add_property(schema_fields.SchemaField(
'tag', 'Name', 'string', select_data=select_data))
schema.add_sub_registry('attributes', registry=tag_schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None, required_modules=tag_class.required_modules(),
extra_js_files=['add_custom_tag.js'])
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def create_bool_select_annotation(
keys_list, label, true_label, false_label, class_name=None,
description=None):
"""Creates inputex annotation to display bool type as a select."""
properties = {
'label': label, 'choices': [
{'value': True, 'label': true_label},
{'value': False, 'label': false_label}]}
if class_name:
properties['className'] = class_name
if description:
properties['description'] = description
return (keys_list, {'type': 'select', '_inputex': properties})
custom_module = None
def register_module():
"""Registers this module in the registry."""
from controllers import sites # pylint: disable-msg=g-import-not-at-top
yui_handlers = [
('/static/inputex-3.1.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'))),
('/static/yui_3.6.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'))),
('/static/2in3/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip')))]
if appengine_config.BUNDLE_LIB_FILES:
yui_handlers += [
('/static/combo/inputex', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'),
'/static/inputex-3.1.0/')),
('/static/combo/yui', sites.make_css_combo_zip_handler(
os.path.join(appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'),
'/yui/')),
('/static/combo/2in3', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip'),
'/static/2in3/'))]
oeditor_handlers = [('/oeditorpopup', PopupHandler)]
global custom_module
custom_module = custom_modules.Module(
'Object Editor',
'A visual editor for editing various types of objects.',
yui_handlers, oeditor_handlers)
return custom_module
| apache-2.0 | -2,058,325,479,341,147,400 | 6,881,963,970,771,572,000 | 39.109848 | 80 | 0.614317 | false |
WSDC-NITWarangal/django | tests/utils_tests/test_checksums.py | 205 | 1267 | import unittest
from django.test import ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
class TestUtilsChecksums(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_luhn(self):
from django.utils import checksums
f = checksums.luhn
items = (
(4111111111111111, True), ('4111111111111111', True),
(4222222222222, True), (378734493671000, True),
(5424000000000015, True), (5555555555554444, True),
(1008, True), ('0000001008', True), ('000000001008', True),
(4012888888881881, True), (1234567890123456789012345678909, True),
(4111111111211111, False), (42222222222224, False),
(100, False), ('100', False), ('0000100', False),
('abc', False), (None, False), (object(), False),
)
for value, output in items:
self.check_output(f, value, output)
| bsd-3-clause | 514,048,274,058,705,500 | -2,071,550,314,016,221,000 | 37.393939 | 78 | 0.622731 | false |
nerdvegas/rez | src/rez/vendor/amqp/utils.py | 36 | 2685 | from __future__ import absolute_import
import sys
try:
import fcntl
except ImportError:
fcntl = None # noqa
class promise(object):
if not hasattr(sys, 'pypy_version_info'):
__slots__ = tuple(
'fun args kwargs value ready failed '
' on_success on_error calls'.split()
)
def __init__(self, fun, args=(), kwargs=(),
on_success=None, on_error=None):
self.fun = fun
self.args = args
self.kwargs = kwargs
self.ready = False
self.failed = False
self.on_success = on_success
self.on_error = on_error
self.value = None
self.calls = 0
def __repr__(self):
return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format(
self,
)
def __call__(self, *args, **kwargs):
try:
self.value = self.fun(
*self.args + args if self.args else args,
**dict(self.kwargs, **kwargs) if self.kwargs else kwargs
)
except Exception as exc:
self.set_error_state(exc)
else:
if self.on_success:
self.on_success(self.value)
finally:
self.ready = True
self.calls += 1
def then(self, callback=None, on_error=None):
self.on_success = callback
self.on_error = on_error
return callback
def set_error_state(self, exc):
self.failed = True
if self.on_error is None:
raise
self.on_error(exc)
def throw(self, exc):
try:
raise exc
except exc.__class__ as with_cause:
self.set_error_state(with_cause)
def noop():
return promise(lambda *a, **k: None)
try:
from os import set_cloexec # Python 3.4?
except ImportError:
def set_cloexec(fd, cloexec): # noqa
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC
except AttributeError:
raise NotImplementedError(
'close-on-exec flag not supported on this platform',
)
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
flags |= FD_CLOEXEC
else:
flags &= ~FD_CLOEXEC
return fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def get_errno(exc):
""":exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7"""
try:
return exc.errno
except AttributeError:
try:
# e.args = (errno, reason)
if isinstance(exc.args, tuple) and len(exc.args) == 2:
return exc.args[0]
except AttributeError:
pass
return 0
| lgpl-3.0 | 1,631,655,935,349,294,300 | -6,059,971,495,286,446,000 | 25.323529 | 74 | 0.528119 | false |
sadmansk/servo | tests/wpt/web-platform-tests/webdriver/tests/release_actions/conftest.py | 41 | 1038 | import pytest
@pytest.fixture
def key_chain(session):
return session.actions.sequence("key", "keyboard_id")
@pytest.fixture
def mouse_chain(session):
return session.actions.sequence(
"pointer",
"pointer_id",
{"pointerType": "mouse"})
@pytest.fixture
def none_chain(session):
return session.actions.sequence("none", "none_id")
@pytest.fixture(autouse=True)
def release_actions(session, request):
# release all actions after each test
# equivalent to a teardown_function, but with access to session fixture
request.addfinalizer(session.actions.release)
@pytest.fixture
def key_reporter(session, test_actions_page, request):
"""Represents focused input element from `test_keys_page` fixture."""
input_el = session.find.css("#keys", all=False)
input_el.click()
session.execute_script("resetEvents();")
return input_el
@pytest.fixture
def test_actions_page(session, url):
session.url = url("/webdriver/tests/release_actions/support/test_actions_wdspec.html")
| mpl-2.0 | 1,854,236,528,294,080,800 | -5,699,691,461,079,627,000 | 24.95 | 90 | 0.710019 | false |
aptrishu/coala-bears | bears/c_languages/ClangBear.py | 16 | 3060 | from clang.cindex import Index, LibclangError
from coalib.bears.LocalBear import LocalBear
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import typed_list
def clang_available(cls):
"""
Checks if Clang is available and ready to use.
:return: True if Clang is available, a description of the error else.
"""
try:
Index.create()
return True
except LibclangError as error: # pragma: no cover
return str(error)
class ClangBear(LocalBear):
LANGUAGES = {'C', 'C++', 'Objective-C', 'Objective-C++', 'OpenMP',
'OpenCL', 'CUDA'}
# Depends on libclang-py3, which is a dependency of coala
REQUIREMENTS = set()
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Variable Misuse', 'Syntax'}
check_prerequisites = classmethod(clang_available)
def run(self, filename, file, clang_cli_options: typed_list(str)=None):
"""
Check code for syntactical or semantical problems using Clang.
This bear supports automatic fixes.
:param clang_cli_options: Any options that will be passed through to
Clang.
"""
index = Index.create()
diagnostics = index.parse(
filename,
args=clang_cli_options,
unsaved_files=[(filename, ''.join(file))]).diagnostics
for diag in diagnostics:
severity = {0: RESULT_SEVERITY.INFO,
1: RESULT_SEVERITY.INFO,
2: RESULT_SEVERITY.NORMAL,
3: RESULT_SEVERITY.MAJOR,
4: RESULT_SEVERITY.MAJOR}.get(diag.severity)
affected_code = tuple(SourceRange.from_clang_range(range)
for range in diag.ranges)
diffs = None
fixits = list(diag.fixits)
if len(fixits) > 0:
# FIXME: coala doesn't support choice of diffs, for now
# append first one only, often there's only one anyway
diffs = {filename: Diff.from_clang_fixit(fixits[0], file)}
# No affected code yet? Let's derive it from the fix!
if len(affected_code) == 0:
affected_code = diffs[filename].affected_code(filename)
# Still no affected code? Position is the best we can get...
if len(affected_code) == 0 and diag.location.file is not None:
affected_code = (SourceRange.from_values(
diag.location.file.name,
diag.location.line,
diag.location.column),)
yield Result(
self,
diag.spelling,
severity=severity,
affected_code=affected_code,
diffs=diffs)
| agpl-3.0 | 1,416,571,862,405,026,300 | -9,201,813,985,865,626,000 | 36.317073 | 76 | 0.579739 | false |