repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
syci/OCB | refs/heads/9.0 | addons/pos_mercury/models/pos_mercury.py | 19 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import sets
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class barcode_rule(models.Model):
_inherit = 'barcode.rule'
def _get_type_selection(self):
types = sets.Set(super(barcode_rule, self)._get_type_selection())
types.update([
('credit', 'Credit Card')
])
return list(types)
class pos_mercury_payment_data(models.Model):
_name = 'pos_mercury.configuration'
# FIELDS #
name = fields.Char(required=True, help='Name of this Mercury configuration')
merchant_id = fields.Char(string='Merchant ID', required=True, help='ID of the merchant to authenticate him on the payment provider server')
merchant_pwd = fields.Char(string='Merchant Password', required=True, help='Password of the merchant to authenticate him on the payment provider server')
class account_bank_statement_line(models.Model):
_inherit = "account.bank.statement.line"
mercury_card_number = fields.Char(string='Card Number', help='The last 4 numbers of the card used to pay')
mercury_prefixed_card_number = fields.Char(string='Card Number', compute='_compute_prefixed_card_number', help='The card number used for the payment.')
mercury_card_brand = fields.Char(string='Card Brand', help='The brand of the payment card (e.g. Visa, AMEX, ...)')
mercury_card_owner_name = fields.Char(string='Card Owner Name', help='The name of the card owner')
mercury_ref_no = fields.Char(string='Mercury reference number', help='Payment reference number from Mercury Pay')
mercury_record_no = fields.Char(string='Mercury record number', help='Payment record number from Mercury Pay')
mercury_invoice_no = fields.Integer(string='Mercury invoice number', help='Invoice number from Mercury Pay')
@api.one
def _compute_prefixed_card_number(self):
if self.mercury_card_number:
self.mercury_prefixed_card_number = "********" + self.mercury_card_number
else:
self.mercury_prefixed_card_number = ""
class account_journal(models.Model):
_inherit = 'account.journal'
pos_mercury_config_id = fields.Many2one('pos_mercury.configuration', string='Mercury configuration', help='The configuration of Mercury used for this journal')
class pos_order_card(models.Model):
_inherit = "pos.order"
@api.model
def _payment_fields(self, ui_paymentline):
fields = super(pos_order_card, self)._payment_fields(ui_paymentline)
fields.update({
'card_number': ui_paymentline.get('mercury_card_number'),
'card_brand': ui_paymentline.get('mercury_card_brand'),
'card_owner_name': ui_paymentline.get('mercury_card_owner_name'),
'ref_no': ui_paymentline.get('mercury_ref_no'),
'record_no': ui_paymentline.get('mercury_record_no'),
'invoice_no': ui_paymentline.get('mercury_invoice_no')
})
return fields
@api.model
def add_payment(self, order_id, data):
statement_id = super(pos_order_card, self).add_payment(order_id, data)
statement_lines = self.env['account.bank.statement.line'].search([('statement_id', '=', statement_id),
('pos_statement_id', '=', order_id),
('journal_id', '=', data['journal']),
('amount', '=', data['amount'])])
# we can get multiple statement_lines when there are >1 credit
# card payments with the same amount. In that case it doesn't
# matter which statement line we pick, just pick one that
# isn't already used.
for line in statement_lines:
if not line.mercury_card_brand:
line.mercury_card_brand = data.get('card_brand')
line.mercury_card_number = data.get('card_number')
line.mercury_card_owner_name = data.get('card_owner_name')
line.mercury_ref_no = data.get('ref_no')
line.mercury_record_no = data.get('record_no')
line.mercury_invoice_no = data.get('invoice_no')
break
return statement_id
|
seaotterman/tensorflow | refs/heads/master | tensorflow/contrib/slim/python/slim/data/data_decoder.py | 146 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions and classes necessary for decoding data.
While data providers read data from disk, sstables or other formats, data
decoders decode the data (if necessary). A data decoder is provided with a
serialized or encoded piece of data as well as a list of items and
returns a set of tensors, each of which correspond to the requested list of
items extracted from the data:
def Decode(self, data, items):
...
For example, if data is a compressed map, the implementation might be:
def Decode(self, data, items):
decompressed_map = _Decompress(data)
outputs = []
for item in items:
outputs.append(decompressed_map[item])
return outputs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class DataDecoder(object):
"""An abstract class which is used to decode data for a provider."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def decode(self, data, items):
"""Decodes the data to returns the tensors specified by the list of items.
Args:
data: A possibly encoded data format.
items: A list of strings, each of which indicate a particular data type.
Returns:
A list of `Tensors`, whose length matches the length of `items`, where
each `Tensor` corresponds to each item.
Raises:
ValueError: If any of the items cannot be satisfied.
"""
pass
@abc.abstractmethod
def list_items(self):
"""Lists the names of the items that the decoder can decode.
Returns:
A list of string names.
"""
pass
|
TheTypoMaster/chromium-crosswalk | refs/heads/master | chrome/test/data/extensions/api_test/activity_log_private/PRESUBMIT.py | 40 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the Chrome WebUI presubmit scripts on our test javascript.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
def GetPathsToPrepend(input_api):
web_dev_style_path = input_api.os_path.join(
input_api.change.RepositoryRoot(),
'chrome',
'browser',
'resources')
return [input_api.PresubmitLocalPath(), web_dev_style_path]
def RunWithPrependedPath(prepended_path, fn, *args):
import sys
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def _CommonChecks(input_api, output_api):
resources = input_api.PresubmitLocalPath()
def _html_css_js_resource(p):
return p.endswith(('.js')) and p.startswith(resources)
def is_resource(maybe_resource):
return _html_css_js_resource(maybe_resource.AbsoluteLocalPath())
from web_dev_style import js_checker
results = []
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
return results
|
jagguli/intellij-community | refs/heads/master | python/lib/Lib/string.py | 92 | """A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L)
####################################################################
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
|
FATruden/boto | refs/heads/master | tests/unit/ec2/test_volume.py | 3 | import mock
from tests.unit import unittest
from boto.ec2.snapshot import Snapshot
from boto.ec2.tag import Tag, TagSet
from boto.ec2.volume import Volume, AttachmentSet, VolumeAttribute
class VolumeTests(unittest.TestCase):
def setUp(self):
self.attach_data = AttachmentSet()
self.attach_data.id = 1
self.attach_data.instance_id = 2
self.attach_data.status = "some status"
self.attach_data.attach_time = 5
self.attach_data.device = "/dev/null"
self.volume_one = Volume()
self.volume_one.id = 1
self.volume_one.create_time = 5
self.volume_one.status = "one_status"
self.volume_one.size = "one_size"
self.volume_one.snapshot_id = 1
self.volume_one.attach_data = self.attach_data
self.volume_one.zone = "one_zone"
self.volume_two = Volume()
self.volume_two.connection = mock.Mock()
self.volume_two.id = 1
self.volume_two.create_time = 6
self.volume_two.status = "two_status"
self.volume_two.size = "two_size"
self.volume_two.snapshot_id = 2
self.volume_two.attach_data = None
self.volume_two.zone = "two_zone"
@mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
def test_startElement_calls_TaggedEC2Object_startElement_with_correct_args(self, startElement):
volume = Volume()
volume.startElement("some name", "some attrs", None)
startElement.assert_called_with(volume, "some name", "some attrs", None)
@mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
def test_startElement_retval_not_None_returns_correct_thing(self, startElement):
tag_set = mock.Mock(TagSet)
startElement.return_value = tag_set
volume = Volume()
retval = volume.startElement(None, None, None)
self.assertEqual(retval, tag_set)
@mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
@mock.patch("boto.resultset.ResultSet")
def test_startElement_with_name_tagSet_calls_ResultSet(self, ResultSet, startElement):
startElement.return_value = None
result_set = mock.Mock(ResultSet([("item", Tag)]))
volume = Volume()
volume.tags = result_set
retval = volume.startElement("tagSet", None, None)
self.assertEqual(retval, volume.tags)
@mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
def test_startElement_with_name_attachmentSet_returns_AttachmentSet(self, startElement):
startElement.return_value = None
attach_data = AttachmentSet()
volume = Volume()
volume.attach_data = attach_data
retval = volume.startElement("attachmentSet", None, None)
self.assertEqual(retval, volume.attach_data)
@mock.patch("boto.ec2.volume.TaggedEC2Object.startElement")
def test_startElement_else_returns_None(self, startElement):
startElement.return_value = None
volume = Volume()
retval = volume.startElement("not tagSet or attachmentSet", None, None)
self.assertEqual(retval, None)
def check_that_attribute_has_been_set(self, name, value, attribute):
volume = Volume()
volume.endElement(name, value, None)
self.assertEqual(getattr(volume, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("volumeId", "some value", "id"),
("createTime", "some time", "create_time"),
("status", "some status", "status"),
("size", 5, "size"),
("snapshotId", 1, "snapshot_id"),
("availabilityZone", "some zone", "zone"),
("someName", "some value", "someName")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_endElement_with_name_status_and_empty_string_value_doesnt_set_status(self):
volume = Volume()
volume.endElement("status", "", None)
self.assertNotEqual(volume.status, "")
def test_update_with_result_set_greater_than_0_updates_dict(self):
self.volume_two.connection.get_all_volumes.return_value = [self.volume_one]
self.volume_two.update()
assert all([self.volume_two.create_time == 5,
self.volume_two.status == "one_status",
self.volume_two.size == "one_size",
self.volume_two.snapshot_id == 1,
self.volume_two.attach_data == self.attach_data,
self.volume_two.zone == "one_zone"])
def test_update_with_validate_true_raises_value_error(self):
self.volume_one.connection = mock.Mock()
self.volume_one.connection.get_all_volumes.return_value = []
with self.assertRaisesRegexp(ValueError, "^1 is not a valid Volume ID$"):
self.volume_one.update(True)
def test_update_returns_status(self):
self.volume_one.connection = mock.Mock()
self.volume_one.connection.get_all_volumes.return_value = [self.volume_two]
retval = self.volume_one.update()
self.assertEqual(retval, "two_status")
def test_delete_calls_delete_volume(self):
self.volume_one.connection = mock.Mock()
self.volume_one.delete()
self.volume_one.connection.delete_volume.assert_called_with(1)
def test_attach_calls_attach_volume(self):
self.volume_one.connection = mock.Mock()
self.volume_one.attach("instance_id", "/dev/null")
self.volume_one.connection.attach_volume.assert_called_with(1, "instance_id", "/dev/null")
def test_detach_calls_detach_volume(self):
self.volume_one.connection = mock.Mock()
self.volume_one.detach()
self.volume_one.connection.detach_volume.assert_called_with(
1, 2, "/dev/null", False)
def test_detach_with_no_attach_data(self):
self.volume_two.connection = mock.Mock()
self.volume_two.detach()
self.volume_two.connection.detach_volume.assert_called_with(
1, None, None, False)
def test_detach_with_force_calls_detach_volume_with_force(self):
self.volume_one.connection = mock.Mock()
self.volume_one.detach(True)
self.volume_one.connection.detach_volume.assert_called_with(
1, 2, "/dev/null", True)
def test_create_snapshot_calls_connection_create_snapshot(self):
self.volume_one.connection = mock.Mock()
self.volume_one.create_snapshot()
self.volume_one.connection.create_snapshot.assert_called_with(
1, None)
def test_create_snapshot_with_description(self):
self.volume_one.connection = mock.Mock()
self.volume_one.create_snapshot("some description")
self.volume_one.connection.create_snapshot.assert_called_with(
1, "some description")
def test_volume_state_returns_status(self):
retval = self.volume_one.volume_state()
self.assertEqual(retval, "one_status")
def test_attachment_state_returns_state(self):
retval = self.volume_one.attachment_state()
self.assertEqual(retval, "some status")
def test_attachment_state_no_attach_data_returns_None(self):
retval = self.volume_two.attachment_state()
self.assertEqual(retval, None)
def test_snapshots_returns_snapshots(self):
snapshot_one = Snapshot()
snapshot_one.volume_id = 1
snapshot_two = Snapshot()
snapshot_two.volume_id = 2
self.volume_one.connection = mock.Mock()
self.volume_one.connection.get_all_snapshots.return_value = [snapshot_one, snapshot_two]
retval = self.volume_one.snapshots()
self.assertEqual(retval, [snapshot_one])
def test_snapshots__with_owner_and_restorable_by(self):
self.volume_one.connection = mock.Mock()
self.volume_one.connection.get_all_snapshots.return_value = []
self.volume_one.snapshots("owner", "restorable_by")
self.volume_one.connection.get_all_snapshots.assert_called_with(
owner="owner", restorable_by="restorable_by")
class AttachmentSetTests(unittest.TestCase):
def check_that_attribute_has_been_set(self, name, value, attribute):
attachment_set = AttachmentSet()
attachment_set.endElement(name, value, None)
self.assertEqual(getattr(attachment_set, attribute), value)
def test_endElement_with_name_volumeId_sets_id(self):
return self.check_that_attribute_has_been_set("volumeId", "some value", "id")
def test_endElement_with_name_instanceId_sets_instance_id(self):
return self.check_that_attribute_has_been_set("instanceId", 1, "instance_id")
def test_endElement_with_name_status_sets_status(self):
return self.check_that_attribute_has_been_set("status", "some value", "status")
def test_endElement_with_name_attachTime_sets_attach_time(self):
return self.check_that_attribute_has_been_set("attachTime", 5, "attach_time")
def test_endElement_with_name_device_sets_device(self):
return self.check_that_attribute_has_been_set("device", "/dev/null", "device")
def test_endElement_with_other_name_sets_other_name_attribute(self):
return self.check_that_attribute_has_been_set("someName", "some value", "someName")
class VolumeAttributeTests(unittest.TestCase):
def setUp(self):
self.volume_attribute = VolumeAttribute()
self.volume_attribute._key_name = "key_name"
self.volume_attribute.attrs = {"key_name": False}
def test_startElement_with_name_autoEnableIO_sets_key_name(self):
self.volume_attribute.startElement("autoEnableIO", None, None)
self.assertEqual(self.volume_attribute._key_name, "autoEnableIO")
def test_startElement_without_name_autoEnableIO_returns_None(self):
retval = self.volume_attribute.startElement("some name", None, None)
self.assertEqual(retval, None)
def test_endElement_with_name_value_and_value_true_sets_attrs_key_name_True(self):
self.volume_attribute.endElement("value", "true", None)
self.assertEqual(self.volume_attribute.attrs['key_name'], True)
def test_endElement_with_name_value_and_value_false_sets_attrs_key_name_False(self):
self.volume_attribute._key_name = "other_key_name"
self.volume_attribute.endElement("value", "false", None)
self.assertEqual(self.volume_attribute.attrs['other_key_name'], False)
def test_endElement_with_name_value_and_value_is_not_bool(self):
for attr in ("tierName", "tierType"):
self.volume_attribute._key_name = attr
self.volume_attribute.endElement("value", "tier-XXXXXX", None)
self.assertEqual(self.volume_attribute.attrs[attr], "tier-XXXXXX")
def test_endElement_with_name_value_and_value_is_bool(self):
for attr in ("autoEnableIO", "replication"):
self.volume_attribute._key_name = attr
self.volume_attribute.endElement("value", "True", None)
self.assertEqual(self.volume_attribute.attrs[attr], True)
def test_endElement_with_name_volumeId_sets_id(self):
self.volume_attribute.endElement("volumeId", "some_value", None)
self.assertEqual(self.volume_attribute.id, "some_value")
def test_endElement_with_other_name_sets_other_name_attribute(self):
self.volume_attribute.endElement("someName", "some value", None)
self.assertEqual(self.volume_attribute.someName, "some value")
if __name__ == "__main__":
unittest.main()
|
e-koch/pyspeckit | refs/heads/master | examples/fit_nh3_cube.py | 5 | """
Fit NH3 Cube
============
Example script to fit all pixels in an NH3 data cube.
This is a bit of a mess, and fairly complicated (intrinsically),
but if you have matched 1-1 + 2-2 + ... NH3 cubes, you should be
able to modify this example and get something useful out.
.. WARNING:: Cube fitting, particularly with a complicated line profile
ammonia, can take a long time. Test this on a small cube first!
.. TODO:: Turn this example script into a function. But customizing
the fit parameters will still require digging into the data manually
(e.g., excluding bad velocities, or excluding the hyperfine lines from
the initial guess)
"""
import pyspeckit
import astropy
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import numpy as np
import os
from astropy.convolution import convolve_fft,Gaussian2DKernel
# set up CASA-like shortcuts
F=False; T=True
# Some optional parameters for the script
# (if False, it will try to load an already-stored version
# of the file)
fitcube = True
# Mask out low S/N pixels (to speed things up)
mask = pyfits.getdata('hotclump_11_mask.fits')
mask = np.isfinite(mask) * (mask > 0)
# Load the data using a mask
# Then calibrate the data (the data we're loading in this case are in Janskys,
# but we want surface brightness in Kelvin for the fitting process)
cube11 = pyspeckit.Cube('hotclump_11.cube_r0.5.image.fits', maskmap=mask)
cube11.cube *= (13.6 * (300.0 /
(pyspeckit.spectrum.models.ammonia.freq_dict['oneone']/1e9))**2 *
1./cube11.header.get('BMAJ')/3600. * 1./cube11.header.get('BMIN')/3600. )
cube11.unit = "K"
cube22 = pyspeckit.Cube('hotclump_22.cube_r0.5_contsub.image.fits', maskmap=mask)
cube22.cube *= (13.6 * (300.0 /
(pyspeckit.spectrum.models.ammonia.freq_dict['twotwo']/1e9))**2 *
1./cube22.header.get('BMAJ')/3600. * 1./cube22.header.get('BMIN')/3600. )
cube22.unit = "K"
cube44 = pyspeckit.Cube('hotclump_44.cube_r0.5_contsub.image.fits', maskmap=mask)
cube44.cube *= (13.6 * (300.0 /
(pyspeckit.spectrum.models.ammonia.freq_dict['fourfour']/1e9))**2 *
1./cube44.header.get('BMAJ')/3600. * 1./cube44.header.get('BMIN')/3600. )
cube44.unit = "K"
# Compute an error map. We use the 1-1 errors for all 3 because they're
# essentially the same, but you could use a different error map for each
# frequency
oneonemomentfn = 'hotclump_11.cube_r0.5_rerun.image.moment_linefree.fits'
errmap11 = (pyfits.getdata(oneonemomentfn).squeeze() * 13.6 *
(300.0 /
(pyspeckit.spectrum.models.ammonia.freq_dict['oneone']/1e9))**2
* 1./cube11.header.get('BMAJ')/3600. *
1./cube11.header.get('BMIN')/3600.)
# Interpolate errors across NaN pixels
errmap11[errmap11 != errmap11] = convolve_fft(errmap11,
Gaussian2DKernel(3),
interpolate_nan=True)[errmap11 != errmap11]
# Stack the cubes into one big cube. The X-axis is no longer linear: there
# will be jumps from 1-1 to 2-2 to 4-4.
cubes = pyspeckit.CubeStack([cube11,cube22,cube44], maskmap=mask)
cubes.unit = "K"
# Make a "moment map" to contain the initial guesses
# If you've already fit the cube, just re-load the saved version
# otherwise, re-fit it
if os.path.exists('hot_momentcube.fits'):
momentcubefile = pyfits.open('hot_momentcube.fits')
momentcube = momentcubefile[0].data
else:
cube11.mapplot()
# compute the moment at each pixel
cube11.momenteach()
momentcube = cube11.momentcube
momentcubefile = pyfits.PrimaryHDU(data=momentcube, header=cube11.header)
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
momentcubefile.writeto('hot_momentcube.fits',overwrite=True)
else:
momentcubefile.writeto('hot_momentcube.fits',clobber=True)
# Create a "guess cube". Because we're fitting physical parameters in this
# case, we want to make the initial guesses somewhat reasonable
# As above, we'll just reload the saved version if it exists
guessfn = 'hot_guesscube.fits'
if os.path.exists(guessfn):
guesscube = pyfits.open(guessfn)
guesses = guesscube[0].data
else:
guesses = np.zeros((6,)+cubes.cube.shape[1:])
guesses[0,:,:] = 20 # Kinetic temperature
guesses[1,:,:] = 5 # Excitation Temp
guesses[2,:,:] = 14.5 # log(column)
guesses[3,:,:] = momentcube[3,:,:] / 5 # Line width / 5 (the NH3 moment overestimates linewidth)
guesses[4,:,:] = momentcube[2,:,:] # Line centroid
guesses[5,:,:] = 0.5 # F(ortho) - ortho NH3 fraction (fixed)
guesscube = pyfits.PrimaryHDU(data=guesses, header=cube11.header)
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
guesscube.writeto(guessfn, overwrite=True)
else:
guesscube.writeto(guessfn, clobber=True)
# This bit doesn't need to be in an if statment
if fitcube:
# excise guesses that fall out of the "good" range
guesses[4,:,:][guesses[4,:,:] > 100] = 100.0
guesses[4,:,:][guesses[4,:,:] < 91] = 95
# do the fits
# signal_cut means ignore any pixel with peak S/N less than this number
# In this fit, many of the parameters are limited
# start_from_point selects the pixel coordinates to start from
# use_nearest_as_guess says that, at each pixel, the input guesses will be
# set by the fitted parameters from the nearest pixel with a good fit
# HOWEVER, because this fitting is done in parallel (multicore=12 means
# 12 parallel fitting processes will run), this actually means that EACH
# core will have its own sub-set of the cube that it will search for good
# fits. So if you REALLY want consistency, you need to do the fit in serial.
cubes.fiteach(fittype='ammonia', multifit=None, guesses=guesses,
integral=False, verbose_level=3, fixed=[F,F,F,F,F,T], signal_cut=3,
limitedmax=[F,F,F,F,T,T],
maxpars=[0,0,0,0,101,1],
limitedmin=[T,T,F,F,T,T],
minpars=[2.73,2.73,0,0,91,0],
use_nearest_as_guess=True, start_from_point=(94,250),
multicore=12,
errmap=errmap11)
# Save the fitted parameters in a data cube
fitcubefile = pyfits.PrimaryHDU(data=np.concatenate([cubes.parcube,cubes.errcube]), header=cubes.header)
fitcubefile.header['PLANE1'] = 'TKIN'
fitcubefile.header['PLANE2'] = 'TEX'
fitcubefile.header['PLANE3'] = 'COLUMN'
fitcubefile.header['PLANE4'] = 'SIGMA'
fitcubefile.header['PLANE5'] = 'VELOCITY'
fitcubefile.header['PLANE6'] = 'FORTHO'
fitcubefile.header['PLANE7'] = 'eTKIN'
fitcubefile.header['PLANE8'] = 'eTEX'
fitcubefile.header['PLANE9'] = 'eCOLUMN'
fitcubefile.header['PLANE10'] = 'eSIGMA'
fitcubefile.header['PLANE11'] = 'eVELOCITY'
fitcubefile.header['PLANE12'] = 'eFORTHO'
fitcubefile.header['CDELT3'] = 1
fitcubefile.header['CTYPE3'] = 'FITPAR'
fitcubefile.header['CRVAL3'] = 0
fitcubefile.header['CRPIX3'] = 1
fitcubefile.writeto("hot_fitcube_try6.fits")
else: # you can read in a fit you've already done!
cubes.load_model_fit('hot_fitcube_try6.fits', 6, 'ammonia', _temp_fit_loc=(94,250))
cubes.specfit.parinfo[5]['fixed'] = True
# Now do some plotting things
import pylab as pl
# Set the map-to-plot to be the line centroid
cubes.mapplot.plane = cubes.parcube[4,:,:]
cubes.mapplot(estimator=None,vmin=91,vmax=101)
# Set the reference frequency to be the 1-1 line frequency
cubes.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']
cubes.xarr.refX_unit='Hz'
# If you wanted to view the spectra in velocity units, use this:
#cubes.xarr.convert_to_unit('km/s')
#cubes.plotter.xmin=55
#cubes.plotter.xmax=135
# Now replace the cube's plotter with a "special" plotter
# The "special" plotter puts the 1-1, 2-2, and 4-4 lines in their own separate
# windows
cubes.plot_special = pyspeckit.wrappers.fitnh3.plotter_override
cubes.plot_special_kwargs = {'fignum':3, 'vrange':[55,135]}
cubes.plot_spectrum(160,99)
# make interactive
pl.ion()
pl.show()
# At this point, you can click on any pixel in the image and see the spectrum
# with the best-fit ammonia profile overlaid.
|
aehlke/manabi | refs/heads/master | manabi/apps/flashcards/models/next_cards_for_review.py | 1 | import pytz
from datetime import datetime
from manabi.apps.flashcards.models import (
Card,
)
from manabi.apps.flashcards.models.new_cards_limit import (
NewCardsLimit,
)
class ReviewInterstitial:
def __init__(
self,
user,
deck=None,
new_cards_per_day_limit_override=None,
early_review_began_at=None,
excluded_card_ids=set(),
time_zone=None,
new_cards_limit=None,
buffered_cards_count=None,
buffered_new_cards_count=None,
is_for_manabi_reader=False,
jmdict_ids=None,
words_without_jmdict_ids=None,
):
'''
`new_cards_limit` is an instance of `NewCardsLimit.`
'''
from manabi.apps.flashcards.models.review_availabilities import (
ReviewAvailabilities,
)
self.review_availabilities = ReviewAvailabilities(
user,
deck=deck,
excluded_card_ids=excluded_card_ids,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
early_review_began_at=early_review_began_at,
time_zone=time_zone,
new_cards_limit=new_cards_limit,
buffered_cards_count=buffered_cards_count,
buffered_new_cards_count=buffered_new_cards_count,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
class NextCardsForReview:
def __init__(
self,
user,
count,
deck=None,
early_review=False,
early_review_began_at=None,
include_new_buried_siblings=False,
new_cards_per_day_limit_override=None,
excluded_card_ids=set(),
is_for_manabi_reader=False,
jmdict_ids=None,
words_without_jmdict_ids=None,
time_zone=None,
):
new_cards_limit = NewCardsLimit(
user,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
)
next_cards = Card.objects.next_cards(
user,
count,
excluded_ids=excluded_card_ids,
deck=deck,
early_review=early_review,
early_review_began_at=early_review_began_at,
include_new_buried_siblings=include_new_buried_siblings,
new_cards_limit=new_cards_limit.next_new_cards_limit,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
card_ids = [card.id for card in next_cards]
# FIXME don't need 2 queries here...
self.cards = (
Card.objects
.filter(pk__in=card_ids)
.select_related('fact')
)
excluded_card_ids.update(card_ids)
buffered_new_cards_count = len([
card for card in self.cards if card.is_new
])
self.interstitial = ReviewInterstitial(
user,
deck=deck,
time_zone=time_zone,
excluded_card_ids=excluded_card_ids,
buffered_cards_count=len(self.cards),
buffered_new_cards_count=buffered_new_cards_count,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
new_cards_limit=new_cards_limit,
early_review_began_at=early_review_began_at,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
self.server_datetime = datetime.now(pytz.utc)
|
eeshangarg/oh-mainline | refs/heads/master | vendor/packages/gdata/src/gdata/tlslite/__init__.py | 409 | """
TLS Lite is a free python library that implements SSL v3, TLS v1, and
TLS v1.1. TLS Lite supports non-traditional authentication methods
such as SRP, shared keys, and cryptoIDs, in addition to X.509
certificates. TLS Lite is pure python, however it can access OpenSSL,
cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite
integrates with httplib, xmlrpclib, poplib, imaplib, smtplib,
SocketServer, asyncore, and Twisted.
To use, do::
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket,
or use one of the integration classes in L{tlslite.integration}.
@version: 0.3.8
"""
__version__ = "0.3.8"
__all__ = ["api",
"BaseDB",
"Checker",
"constants",
"errors",
"FileObject",
"HandshakeSettings",
"mathtls",
"messages",
"Session",
"SessionCache",
"SharedKeyDB",
"TLSConnection",
"TLSRecordLayer",
"VerifierDB",
"X509",
"X509CertChain",
"integration",
"utils"]
|
xforce/diorama-native-modding | refs/heads/master | tools/gyp/test/msvs/multiple_actions_error_handling/action_fail.py | 124 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
sys.exit(1)
|
whs/django | refs/heads/master | tests/admin_views/admin.py | 17 | import os
import tempfile
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Answer2, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline,
ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
)
list_editable = ('section',)
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super().save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super().get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super().get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_display = ('id', 'name',)
list_display_links = ('id',)
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super().get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ['-id']
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ['-id']
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super().change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super().get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super().clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super().add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super().change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super().get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer, date_hierarchy='question__posted')
site.register(Answer2, date_hierarchy='question__expires')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
|
demon-ru/iml-crm | refs/heads/master | addons/l10n_fr_rib/__openerp__.py | 425 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French RIB Bank Details',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module lets users enter the banking details of Partners in the RIB format (French standard for bank accounts details).
===========================================================================================================================
RIB Bank Accounts can be entered in the "Accounting" tab of the Partner form by specifying the account type "RIB".
The four standard RIB fields will then become mandatory:
--------------------------------------------------------
- Bank Code
- Office Code
- Account number
- RIB key
As a safety measure, OpenERP will check the RIB key whenever a RIB is saved, and
will refuse to record the data if the key is incorrect. Please bear in mind that
this can only happen when the user presses the 'save' button, for example on the
Partner Form. Since each bank account may relate to a Bank, users may enter the
RIB Bank Code in the Bank form - it will the pre-fill the Bank Code on the RIB
when they select the Bank. To make this easier, this module will also let users
find Banks using their RIB code.
The module base_iban can be a useful addition to this module, because French banks
are now progressively adopting the international IBAN format instead of the RIB format.
The RIB and IBAN codes for a single account can be entered by recording two Bank
Accounts in OpenERP: the first with the type 'RIB', the second with the type 'IBAN'.
""",
'author' : u'Numérigraphe SARL',
'depends': ['account', 'base_iban'],
'data': ['bank_data.xml', 'bank_view.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
daineseh/kodi-plugin.video.ted-talks-chinese | refs/heads/master | youtube_dl/extractor/escapist.py | 44 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
clean_html,
int_or_none,
float_or_none,
sanitized_Request,
)
def _decrypt_config(key, string):
a = ''
i = ''
r = ''
while len(a) < (len(string) / 2):
a += key
a = a[0:int(len(string) / 2)]
t = 0
while t < len(string):
i += chr(int(string[t] + string[t + 1], 16))
t += 2
icko = [s for s in i]
for t, c in enumerate(a):
r += chr(ord(c) ^ ord(icko[t]))
return r
class EscapistIE(InfoExtractor):
_VALID_URL = r'https?://?(?:www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
_TESTS = [{
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
'info_dict': {
'id': '6618',
'ext': 'mp4',
'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
'title': "Breaking Down Baldur's Gate",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 264,
'uploader': 'The Escapist',
}
}, {
'url': 'http://www.escapistmagazine.com/videos/view/zero-punctuation/10044-Evolve-One-vs-Multiplayer',
'md5': '9e8c437b0dbb0387d3bd3255ca77f6bf',
'info_dict': {
'id': '10044',
'ext': 'mp4',
'description': 'This week, Zero Punctuation reviews Evolve.',
'title': 'Evolve - One vs Multiplayer',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 304,
'uploader': 'The Escapist',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
ims_video = self._parse_json(
self._search_regex(
r'imsVideo\.play\(({.+?})\);', webpage, 'imsVideo'),
video_id)
video_id = ims_video['videoID']
key = ims_video['hash']
config_req = sanitized_Request(
'http://www.escapistmagazine.com/videos/'
'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
config_req.add_header('Referer', url)
config = self._download_webpage(config_req, video_id, 'Downloading video config')
data = json.loads(_decrypt_config(key, config))
video_data = data['videoData']
title = clean_html(video_data['title'])
duration = float_or_none(video_data.get('duration'), 1000)
uploader = video_data.get('publisher')
formats = [{
'url': video['src'],
'format_id': '%s-%sp' % (determine_ext(video['src']), video['res']),
'height': int_or_none(video.get('res')),
} for video in data['files']['videos']]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'duration': duration,
'uploader': uploader,
}
|
pratikmallya/hue | refs/heads/master | desktop/core/ext-py/ndg_httpsclient-0.4.0/ndg/httpsclient/ssl_peer_verification.py | 71 | """ndg_httpsclient - module containing SSL peer verification class.
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import re
import logging
log = logging.getLogger(__name__)
try:
from ndg.httpsclient.subj_alt_name import SubjectAltName
from pyasn1.codec.der import decoder as der_decoder
SUBJ_ALT_NAME_SUPPORT = True
except ImportError as e:
SUBJ_ALT_NAME_SUPPORT = False
SUBJ_ALT_NAME_SUPPORT_MSG = (
'SubjectAltName support is disabled - check pyasn1 package '
'installation to enable'
)
import warnings
warnings.warn(SUBJ_ALT_NAME_SUPPORT_MSG)
class ServerSSLCertVerification(object):
"""Check server identity. If hostname doesn't match, allow match of
host's Distinguished Name against server DN setting"""
DN_LUT = {
'commonName': 'CN',
'organisationalUnitName': 'OU',
'organisation': 'O',
'countryName': 'C',
'emailAddress': 'EMAILADDRESS',
'localityName': 'L',
'stateOrProvinceName': 'ST',
'streetAddress': 'STREET',
'domainComponent': 'DC',
'userid': 'UID'
}
SUBJ_ALT_NAME_EXT_NAME = 'subjectAltName'
PARSER_RE_STR = '/(%s)=' % '|'.join(list(DN_LUT.keys()) + list(DN_LUT.values()))
PARSER_RE = re.compile(PARSER_RE_STR)
__slots__ = ('__hostname', '__certDN', '__subj_alt_name_match')
def __init__(self, certDN=None, hostname=None, subj_alt_name_match=True):
"""Override parent class __init__ to enable setting of certDN
setting
@type certDN: string
@param certDN: Set the expected Distinguished Name of the
server to avoid errors matching hostnames. This is useful
where the hostname is not fully qualified
@type hostname: string
@param hostname: hostname to match against peer certificate
subjectAltNames or subject common name
@type subj_alt_name_match: bool
@param subj_alt_name_match: flag to enable/disable matching of hostname
against peer certificate subjectAltNames. Nb. A setting of True will
be ignored if the pyasn1 package is not installed
"""
self.__certDN = None
self.__hostname = None
if certDN is not None:
self.certDN = certDN
if hostname is not None:
self.hostname = hostname
if subj_alt_name_match:
if not SUBJ_ALT_NAME_SUPPORT:
log.warning('Overriding "subj_alt_name_match" keyword setting: '
'peer verification with subjectAltNames is disabled')
self.__subj_alt_name_match = False
else:
self.__subj_alt_name_match = True
else:
log.debug('Disabling peer verification with subject '
'subjectAltNames!')
self.__subj_alt_name_match = False
def __call__(self, connection, peerCert, errorStatus, errorDepth,
preverifyOK):
"""Verify server certificate
@type connection: OpenSSL.SSL.Connection
@param connection: SSL connection object
@type peerCert: basestring
@param peerCert: server host certificate as OpenSSL.crypto.X509
instance
@type errorStatus: int
@param errorStatus: error status passed from caller. This is the value
returned by the OpenSSL C function X509_STORE_CTX_get_error(). Look-up
x509_vfy.h in the OpenSSL source to get the meanings of the different
codes. PyOpenSSL doesn't help you!
@type errorDepth: int
@param errorDepth: a non-negative integer representing where in the
certificate chain the error occurred. If it is zero it occured in the
end entity certificate, one if it is the certificate which signed the
end entity certificate and so on.
@type preverifyOK: int
@param preverifyOK: the error status - 0 = Error, 1 = OK of the current
SSL context irrespective of any verification checks done here. If this
function yields an OK status, it should enforce the preverifyOK value
so that any error set upstream overrides and is honoured.
@rtype: int
@return: status code - 0/False = Error, 1/True = OK
"""
if peerCert.has_expired():
# Any expired certificate in the chain should result in an error
log.error('Certificate %r in peer certificate chain has expired',
peerCert.get_subject())
return False
elif errorDepth == 0:
# Only interested in DN of last certificate in the chain - this must
# match the expected Server DN setting
peerCertSubj = peerCert.get_subject()
peerCertDN = peerCertSubj.get_components()
peerCertDN.sort()
if self.certDN is None:
# Check hostname against peer certificate CN field instead:
if self.hostname is None:
log.error('No "hostname" or "certDN" set to check peer '
'certificate against')
return False
# Check for subject alternative names
if self.__subj_alt_name_match:
dns_names = self._get_subj_alt_name(peerCert)
if self.hostname in dns_names:
return preverifyOK
# If no subjectAltNames, default to check of subject Common Name
if peerCertSubj.commonName == self.hostname:
return preverifyOK
else:
log.error('Peer certificate CN %r doesn\'t match the '
'expected CN %r', peerCertSubj.commonName,
self.hostname)
return False
else:
if peerCertDN == self.certDN:
return preverifyOK
else:
log.error('Peer certificate DN %r doesn\'t match the '
'expected DN %r', peerCertDN, self.certDN)
return False
else:
return preverifyOK
def get_verify_server_cert_func(self):
def verify_server_cert(connection, peerCert, errorStatus, errorDepth,
preverifyOK):
return self.__call__(connection, peerCert, errorStatus,
errorDepth, preverifyOK)
return verify_server_cert
@classmethod
def _get_subj_alt_name(cls, peer_cert):
'''Extract subjectAltName DNS name settings from certificate extensions
@param peer_cert: peer certificate in SSL connection. subjectAltName
settings if any will be extracted from this
@type peer_cert: OpenSSL.crypto.X509
'''
# Search through extensions
dns_name = []
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name == cls.SUBJ_ALT_NAME_EXT_NAME:
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if isinstance(name, SubjectAltName):
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
dns_name.append(str(component.getComponent()))
return dns_name
def _getCertDN(self):
return self.__certDN
def _setCertDN(self, val):
if isinstance(val, str):
# Allow for quoted DN
certDN = val.strip('"')
dnFields = self.__class__.PARSER_RE.split(certDN)
if len(dnFields) < 2:
raise TypeError('Error parsing DN string: "%s"' % certDN)
self.__certDN = list(zip(dnFields[1::2], dnFields[2::2]))
self.__certDN.sort()
elif not isinstance(val, list):
for i in val:
if not len(i) == 2:
raise TypeError('Expecting list of two element DN field, '
'DN field value pairs for "certDN" '
'attribute')
self.__certDN = val
else:
raise TypeError('Expecting list or string type for "certDN" '
'attribute')
certDN = property(fget=_getCertDN,
fset=_setCertDN,
doc="Distinguished Name for Server Certificate")
# Get/Set Property methods
def _getHostname(self):
return self.__hostname
def _setHostname(self, val):
if not isinstance(val, str):
raise TypeError("Expecting string type for hostname "
"attribute")
self.__hostname = val
hostname = property(fget=_getHostname,
fset=_setHostname,
doc="hostname of server")
|
rohitwaghchaure/digitales_erpnext | refs/heads/develop | erpnext/setup/doctype/global_defaults/global_defaults.py | 34 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.core.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"print_style": "print_style",
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total_export", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total_export", "print_hide", self.disable_rounded_total, "Check")
|
ychen820/microblog | refs/heads/master | y/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/gcloud/sdktools/components/update.py | 11 | # Copyright 2013 Google Inc. All Rights Reserved.
"""The command to install/update gcloud components."""
import argparse
import textwrap
from googlecloudsdk.calliope import base
class Update(base.Command):
"""Update or install one or more Cloud SDK components or packages.
Ensure that the latest version of each specified component, and the latest
version of all components upon which the specified components directly or
indirectly depend, is installed on the local workstation. If the command
includes one or more names of components or packages, the specified components
are the named components and the components contained in the named packages;
if the command does not name any components or packages, the specified
components are all installed components.
"""
detailed_help = {
'DESCRIPTION': textwrap.dedent("""\
{description}
The items may be individual components or preconfigured packages. If a
downloaded component was not previously installed, the downloaded
version is installed. If an earlier version of the component was
previously installed, that version is replaced by the downloaded
version.
If, for example, the component ``UNICORN-FACTORY'' depends on the
component ``HORN-FACTORY'', installing the latest version of
``UNICORN-FACTORY'' will cause the version of ``HORN-FACTORY'' upon
which it depends to be installed as well, if it is not already
installed. The command lists all components it is about to install,
and asks for confirmation before proceeding.
"""),
'EXAMPLES': textwrap.dedent("""\
The following command ensures that the latest version is installed for
``COMPONENT-1'', ``COMPONENT-2'', and all components that depend,
directly or indirectly, on either ``COMPONENT-1'' or ``COMPONENT-2'':
$ gcloud components update COMPONENT-1 COMPONENT-2
"""),
}
@staticmethod
def Args(parser):
parser.add_argument(
'component_ids',
metavar='COMPONENT-IDS',
nargs='*',
help='The IDs of the components to be updated or installed.')
parser.add_argument(
'--allow-no-backup',
required=False,
action='store_true',
help=argparse.SUPPRESS)
def Run(self, args):
"""Runs the list command."""
self.group.update_manager.Update(
args.component_ids, allow_no_backup=args.allow_no_backup)
|
CS-SI/QGIS | refs/heads/master | python/plugins/processing/algs/qgis/HypsometricCurves.py | 5 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HypsometricCurves.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import numpy
import csv
from osgeo import gdal, ogr, osr
from qgis.core import (QgsRectangle,
QgsGeometry,
QgsFeatureRequest,
QgsProcessing,
QgsProcessingParameterBoolean,
QgsProcessingParameterNumber,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFolderDestination)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import raster
class HypsometricCurves(QgisAlgorithm):
INPUT_DEM = 'INPUT_DEM'
BOUNDARY_LAYER = 'BOUNDARY_LAYER'
STEP = 'STEP'
USE_PERCENTAGE = 'USE_PERCENTAGE'
OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
def group(self):
return self.tr('Raster terrain analysis')
def groupId(self):
return 'rasterterrainanalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT_DEM,
self.tr('DEM to analyze')))
self.addParameter(QgsProcessingParameterFeatureSource(self.BOUNDARY_LAYER,
self.tr('Boundary layer'), [QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterNumber(self.STEP,
self.tr('Step'), type=QgsProcessingParameterNumber.Double, minValue=0.0, maxValue=999999999.999999, defaultValue=100.0))
self.addParameter(QgsProcessingParameterBoolean(self.USE_PERCENTAGE,
self.tr('Use % of area instead of absolute value'), defaultValue=False))
self.addParameter(QgsProcessingParameterFolderDestination(self.OUTPUT_DIRECTORY,
self.tr('Hypsometric curves')))
def name(self):
return 'hypsometriccurves'
def displayName(self):
return self.tr('Hypsometric curves')
def processAlgorithm(self, parameters, context, feedback):
raster_layer = self.parameterAsRasterLayer(parameters, self.INPUT_DEM, context)
target_crs = raster_layer.crs()
rasterPath = raster_layer.source()
source = self.parameterAsSource(parameters, self.BOUNDARY_LAYER, context)
step = self.parameterAsDouble(parameters, self.STEP, context)
percentage = self.parameterAsBool(parameters, self.USE_PERCENTAGE, context)
outputPath = self.parameterAsString(parameters, self.OUTPUT_DIRECTORY, context)
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterBand = rasterDS.GetRasterBand(1)
noData = rasterBand.GetNoDataValue()
cellXSize = abs(geoTransform[1])
cellYSize = abs(geoTransform[5])
rasterXSize = rasterDS.RasterXSize
rasterYSize = rasterDS.RasterYSize
rasterBBox = QgsRectangle(geoTransform[0],
geoTransform[3] - cellYSize * rasterYSize,
geoTransform[0] + cellXSize * rasterXSize,
geoTransform[3])
rasterGeom = QgsGeometry.fromRect(rasterBBox)
crs = osr.SpatialReference()
crs.ImportFromProj4(str(target_crs.toProj4()))
memVectorDriver = ogr.GetDriverByName('Memory')
memRasterDriver = gdal.GetDriverByName('MEM')
features = source.getFeatures(QgsFeatureRequest().setDestinationCrs(target_crs, context.transformContext()))
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if not f.hasGeometry():
continue
if feedback.isCanceled():
break
geom = f.geometry()
intersectedGeom = rasterGeom.intersection(geom)
if intersectedGeom.isEmpty():
feedback.pushInfo(
self.tr('Feature {0} does not intersect raster or '
'entirely located in NODATA area').format(f.id()))
continue
fName = os.path.join(
outputPath, 'hystogram_%s_%s.csv' % (source.sourceName(), f.id()))
ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.asWkt())
bbox = intersectedGeom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
if srcOffset[2] == 0 or srcOffset[3] == 0:
feedback.pushInfo(
self.tr('Feature {0} is smaller than raster '
'cell size').format(f.id()))
continue
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5]
)
memVDS = memVectorDriver.CreateDataSource('out')
memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)
ft = ogr.Feature(memLayer.GetLayerDefn())
ft.SetGeometry(ogrGeom)
memLayer.CreateFeature(ft)
ft.Destroy()
rasterizedDS = memRasterDriver.Create('', srcOffset[2],
srcOffset[3], 1, gdal.GDT_Byte)
rasterizedDS.SetGeoTransform(newGeoTransform)
gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
rasterizedArray = rasterizedDS.ReadAsArray()
srcArray = numpy.nan_to_num(srcArray)
masked = numpy.ma.MaskedArray(srcArray,
mask=numpy.logical_or(srcArray == noData,
numpy.logical_not(rasterizedArray)))
self.calculateHypsometry(f.id(), fName, feedback, masked,
cellXSize, cellYSize, percentage, step)
memVDS = None
rasterizedDS = None
feedback.setProgress(int(current * total))
rasterDS = None
return {self.OUTPUT_DIRECTORY: outputPath}
def calculateHypsometry(self, fid, fName, feedback, data, pX, pY,
percentage, step):
out = dict()
d = data.compressed()
if d.size == 0:
feedback.pushInfo(
self.tr('Feature {0} does not intersect raster or '
'entirely located in NODATA area').format(fid))
return
minValue = d.min()
maxValue = d.max()
startValue = minValue
tmpValue = minValue + step
while startValue < maxValue:
out[tmpValue] = ((startValue <= d) & (d < tmpValue)).sum()
startValue = tmpValue
tmpValue += step
if percentage:
multiplier = 100.0 / len(d.flat)
else:
multiplier = pX * pY
for k, v in list(out.items()):
out[k] = v * multiplier
prev = None
for i in sorted(out.items()):
if prev is None:
out[i[0]] = i[1]
else:
out[i[0]] = i[1] + out[prev]
prev = i[0]
with open(fName, 'w', newline='', encoding='utf-8') as out_file:
writer = csv.writer(out_file)
writer.writerow([self.tr('Area'), self.tr('Elevation')])
for i in sorted(out.items()):
writer.writerow([i[1], i[0]])
|
asfaltboy/GitSavvy | refs/heads/master | core/commands/pull.py | 1 | import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
class GsPullCommand(WindowCommand, GitCommand):
"""
Through a series of panels, allow the user to pull from a remote branch.
"""
def run(self):
sublime.set_timeout_async(self.run_async)
def run_async(self):
"""
Display a panel of all remotes defined for the repo, then proceed to
`on_select_remote`. If no remotes are defined, notify the user and
proceed no further.
"""
self.remotes = list(self.get_remotes().keys())
pre_selected_idx = (self.remotes.index(self.last_remote_used)
if self.last_remote_used in self.remotes
else 0)
if not self.remotes:
self.window.show_quick_panel(["There are no remotes available."], None)
else:
self.window.show_quick_panel(
self.remotes,
self.on_select_remote,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_idx
)
def on_select_remote(self, remote_index):
"""
After the user selects a remote, display a panel of branches that are
present on that remote, then proceed to `on_select_branch`.
"""
# If the user pressed `esc` or otherwise cancelled.
if remote_index == -1:
return
self.selected_remote = self.remotes[remote_index]
# Save the selected remote for automatic selection on next palette command.
self.last_remote_used = self.selected_remote
self.branches_on_selected_remote = self.list_remote_branches(self.selected_remote)
current_local_branch = self.get_current_branch_name()
try:
pre_selected_idx = self.branches_on_selected_remote.index(
self.selected_remote + "/" + current_local_branch)
except ValueError:
pre_selected_idx = 0
def deferred_panel():
self.window.show_quick_panel(
self.branches_on_selected_remote,
self.on_select_branch,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_idx
)
sublime.set_timeout(deferred_panel)
def on_select_branch(self, branch_index):
"""
Determine the actual branch name of the user's selection, and proceed
to `do_pull`.
"""
# If the user pressed `esc` or otherwise cancelled.
if branch_index == -1:
return
selected_branch = self.branches_on_selected_remote[branch_index].split("/", 1)[1]
sublime.set_timeout_async(lambda: self.do_pull(self.selected_remote, selected_branch))
def do_pull(self, remote, branch):
"""
Perform `git pull remote branch`.
"""
sublime.status_message("Starting pull...")
self.pull(remote, branch)
sublime.status_message("Pull complete.")
util.view.refresh_gitsavvy(self.window.active_view())
|
arbrandes/edx-platform | refs/heads/master | lms/djangoapps/mobile_api/course_info/urls.py | 5 | """
URLs for course_info API
"""
from django.conf import settings
from django.conf.urls import url
from .views import CourseHandoutsList, CourseUpdatesList
urlpatterns = [
url(
fr'^{settings.COURSE_ID_PATTERN}/handouts$',
CourseHandoutsList.as_view(),
name='course-handouts-list'
),
url(
fr'^{settings.COURSE_ID_PATTERN}/updates$',
CourseUpdatesList.as_view(),
name='course-updates-list'
),
]
|
garnaat/boto | refs/heads/develop | tests/integration/cognito/sync/__init__.py | 473 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
moijes12/oh-mainline | refs/heads/master | vendor/packages/Django/tests/regressiontests/comment_tests/tests/comment_utils_moderators_tests.py | 92 | from __future__ import absolute_import
from django.contrib.comments.models import Comment
from django.contrib.comments.moderation import (moderator, CommentModerator,
AlreadyModerated)
from django.core import mail
from . import CommentTestCase
from ..models import Entry
class EntryModerator1(CommentModerator):
email_notification = True
class EntryModerator2(CommentModerator):
enable_field = 'enable_comments'
class EntryModerator3(CommentModerator):
auto_close_field = 'pub_date'
close_after = 7
class EntryModerator4(CommentModerator):
auto_moderate_field = 'pub_date'
moderate_after = 7
class EntryModerator5(CommentModerator):
auto_moderate_field = 'pub_date'
moderate_after = 0
class EntryModerator6(CommentModerator):
auto_close_field = 'pub_date'
close_after = 0
class CommentUtilsModeratorTests(CommentTestCase):
fixtures = ["comment_utils.xml"]
def createSomeComments(self):
# Tests for the moderation signals must actually post data
# through the comment views, because only the comment views
# emit the custom signals moderation listens for.
e = Entry.objects.get(pk=1)
data = self.getValidData(e)
self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
# We explicitly do a try/except to get the comment we've just
# posted because moderation may have disallowed it, in which
# case we can just return it as None.
try:
c1 = Comment.objects.all()[0]
except IndexError:
c1 = None
self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
try:
c2 = Comment.objects.all()[0]
except IndexError:
c2 = None
return c1, c2
def tearDown(self):
moderator.unregister(Entry)
def testRegisterExistingModel(self):
moderator.register(Entry, EntryModerator1)
self.assertRaises(AlreadyModerated, moderator.register, Entry, EntryModerator1)
def testEmailNotification(self):
moderator.register(Entry, EntryModerator1)
self.createSomeComments()
self.assertEqual(len(mail.outbox), 2)
def testCommentsEnabled(self):
moderator.register(Entry, EntryModerator2)
self.createSomeComments()
self.assertEqual(Comment.objects.all().count(), 1)
def testAutoCloseField(self):
moderator.register(Entry, EntryModerator3)
self.createSomeComments()
self.assertEqual(Comment.objects.all().count(), 0)
def testAutoModerateField(self):
moderator.register(Entry, EntryModerator4)
c1, c2 = self.createSomeComments()
self.assertEqual(c2.is_public, False)
def testAutoModerateFieldImmediate(self):
moderator.register(Entry, EntryModerator5)
c1, c2 = self.createSomeComments()
self.assertEqual(c2.is_public, False)
def testAutoCloseFieldImmediate(self):
moderator.register(Entry, EntryModerator6)
c1, c2 = self.createSomeComments()
self.assertEqual(Comment.objects.all().count(), 0) |
Omegaphora/external_chromium_org_tools_gyp | refs/heads/lp5.1 | test/ninja/use-console/gyptest-use-console.py | 120 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure 'ninja_use_console' is supported in actions and rules.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('use-console.gyp')
no_pool = open(test.built_file_path('obj/no_pool.ninja')).read()
if 'pool =' in no_pool:
test.fail_test()
action_pool = open(test.built_file_path('obj/action_pool.ninja')).read()
if 'pool = console' not in action_pool:
test.fail_test()
rule_pool = open(test.built_file_path('obj/rule_pool.ninja')).read()
if 'pool = console' not in rule_pool:
test.fail_test()
test.pass_test()
|
cmorgan/pybrain | refs/heads/master | pybrain/tools/benchmark.py | 31 | __author__ = 'Justin Bayer, bayerj@in.tum.de'
from pybrain.datasets.dataset import DataSet
class BenchmarkDataSet(DataSet):
def __init__(self):
super(BenchmarkDataSet, self).__init__()
self.addField('Average Reward', 1)
self.addField('Episode Length', 1)
self.linkFields(['Average Reward', 'Episode Length'])
def _initialValues(self):
return tuple(), dict()
|
nitzmahone/ansible-modules-extras | refs/heads/devel | monitoring/pagerduty.py | 132 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- This module will let you create PagerDuty maintenance windows
version_added: "1.2"
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns"
- "Bruce Pennypacker"
requirements:
- PagerDuty API access
options:
state:
description:
- Create a maintenance window or get a list of ongoing windows.
required: true
default: null
choices: [ "running", "started", "ongoing", "absent" ]
aliases: []
name:
description:
- PagerDuty unique subdomain.
required: true
default: null
choices: []
aliases: []
user:
description:
- PagerDuty user ID.
required: true
default: null
choices: []
aliases: []
passwd:
description:
- PagerDuty user password.
required: true
default: null
choices: []
aliases: []
token:
description:
- A pagerduty token, generated on the pagerduty site. Can be used instead of
user/passwd combination.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
requester_id:
description:
- ID of user making the request. Only needed when using a token and creating a maintenance_window.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
service:
description:
- A comma separated list of PagerDuty service IDs.
required: false
default: null
choices: []
aliases: [ services ]
hours:
description:
- Length of maintenance window in hours.
required: false
default: 1
choices: []
aliases: []
minutes:
description:
- Maintenance window in minutes (this is added to the hours).
required: false
default: 0
choices: []
aliases: []
version_added: '1.8'
desc:
description:
- Short description of maintenance window.
required: false
default: Created by Ansible
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES='''
# List ongoing maintenance windows using a user/passwd
- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
# List ongoing maintenance windows using a token
- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
# Create a 1 hour maintenance window for service FOO123, using a user/passwd
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
# Create a 5 minute maintenance window for service FOO123, using a token
- pagerduty: name=companyabc
token=xxxxxxxxxxxxxx
hours=0
minutes=5
state=running
service=FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
hours=4
desc=deployment
register: pd_window
# Delete the previous maintenance window
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=absent
service={{ pd_window.result.maintenance_window.id }}
'''
import datetime
import base64
def auth_header(user, passwd, token):
if token:
return "Token token=%s" % token
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
return "Basic %s" % auth
def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
headers = {"Authorization": auth_header(user, passwd, token)}
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, False
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
if info['status'] != 200:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def absent(module, name, user, passwd, token, requester_id, service):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
if info['status'] != 200:
module.fail_json(msg="failed to delete the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
name=dict(required=True),
user=dict(required=False),
passwd=dict(required=False),
token=dict(required=False),
service=dict(required=False, type='list', aliases=["services"]),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name = module.params['name']
user = module.params['user']
passwd = module.params['passwd']
token = module.params['token']
service = module.params['service']
hours = module.params['hours']
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
requester_id = module.params['requester_id']
if not token and not (user or passwd):
module.fail_json(msg="neither user and passwd nor token specified")
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
(rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if rc == 0:
changed=True
if state == "ongoing":
(rc, out, changed) = ongoing(module, name, user, passwd, token)
if state == "absent":
(rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
TheTypoMaster/evennia | refs/heads/master | evennia/server/portal/rss.py | 4 | """
RSS parser for Evennia
This connects an RSS feed to an in-game Evennia channel, sending messages
to the channel whenever the feed updates.
"""
from twisted.internet import task, threads
from django.conf import settings
from evennia.server.session import Session
from evennia.utils import logger
RSS_ENABLED = settings.RSS_ENABLED
#RETAG = re.compile(r'<[^>]*?>')
if RSS_ENABLED:
try:
import feedparser
except ImportError:
raise ImportError("RSS requires python-feedparser to be installed. Install or set RSS_ENABLED=False.")
class RSSReader(Session):
"""
A simple RSS reader using the feedparser module.
"""
def __init__(self, factory, url, rate):
"""
Initialize the reader.
Args:
factory (RSSFactory): The protocol factory.
url (str): The RSS url.
rate (int): The seconds between RSS lookups.
"""
self.url = url
self.rate = rate
self.factory = factory
self.old_entries = {}
def get_new(self):
"""
Returns list of new items.
"""
feed = feedparser.parse(self.url)
new_entries = []
for entry in feed['entries']:
idval = entry['id'] + entry.get("updated", "")
if idval not in self.old_entries:
self.old_entries[idval] = entry
new_entries.append(entry)
return new_entries
def disconnect(self, reason=None):
"""
Disconnect from feed.
Args:
reason (str, optional): Motivation for the disconnect.
"""
if self.factory.task and self.factory.task.running:
self.factory.task.stop()
self.sessionhandler.disconnect(self)
def _callback(self, new_entries, init):
"""
Called when RSS returns.
Args:
new_entries (list): List of new RSS entries since last.
init (bool): If this is a startup operation (at which
point all entries are considered new).
"""
if not init:
# for initialization we just ignore old entries
for entry in reversed(new_entries):
self.data_in("bot_data_in " + entry)
def data_in(self, text=None, **kwargs):
"""
Data RSS -> Evennia.
Kwargs:
text (str): Incoming text
kwargs (any): Options from protocol.
"""
self.sessionhandler.data_in(self, text=text, **kwargs)
def _errback(self, fail):
"Report error"
logger.log_errmsg("RSS feed error: %s" % fail.value)
def update(self, init=False):
"""
Request the latest version of feed.
Args:
init (bool, optional): If this is an initialization call
or not (during init, all entries are conidered new).
Notes:
This call is done in a separate thread to avoid blocking
on slow connections.
"""
return threads.deferToThread(self.get_new).addCallback(self._callback, init).addErrback(self._errback)
class RSSBotFactory(object):
"""
Initializes new bots.
"""
def __init__(self, sessionhandler, uid=None, url=None, rate=None):
"""
Initialize the bot.
Args:
sessionhandler (PortalSessionHandler): The main sessionhandler object.
uid (int): User id for the bot.
url (str): The RSS URL.
rate (int): How often for the RSS to request the latest RSS entries.
"""
self.sessionhandler = sessionhandler
self.url = url
self.rate = rate
self.uid = uid
self.bot = RSSReader(self, url, rate)
self.task = None
def start(self):
"""
Called by portalsessionhandler. Starts te bot.
"""
def errback(fail):
logger.log_errmsg(fail.value)
# set up session and connect it to sessionhandler
self.bot.init_session("rssbot", self.url, self.sessionhandler)
self.bot.uid = self.uid
self.bot.logged_in = True
self.sessionhandler.connect(self.bot)
# start repeater task
self.bot.update(init=True)
self.task = task.LoopingCall(self.bot.update)
if self.rate:
self.task.start(self.rate, now=False).addErrback(errback)
|
spatialdev/onadata | refs/heads/master | onadata/settings/default_settings.py | 11 | # this system uses structured settings.py as defined in
# http://www.slideshare.net/jacobian/the-best-and-worst-of-django
#
# this third-level staging file overrides some definitions in staging.py
# You may wish to alter it to agree with your local environment
#
# get most settings from staging_example.py (which in turn, imports from
# settings.py)
from staging_example import * # nopep8
# # # now override the settings which came from staging # # # #
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'onadata',
'USER': 'onadata',
'PASSWORD': '',
'HOST': '127.0.0.1',
'OPTIONS': {
# note: this option obsolete starting with django 1.6
'autocommit': True,
}
}
}
DATABASE_ROUTERS = [] # turn off second database
# Make a unique unique key just for testing, and don't share it with anybody.
SECRET_KEY = 'mlfs33^s1l4xf6a36$0#j%dd*sisfoi&)&4s-v=91#^l01v)*j'
|
mbayon/TFG-MachineLearning | refs/heads/master | venv/lib/python3.6/site-packages/numpy/distutils/command/develop.py | 264 | """ Override the develop command from setuptools so we can ensure that our
generated files (from build_src or build_scripts) are properly converted to real
files with filenames.
"""
from __future__ import division, absolute_import, print_function
from setuptools.command.develop import develop as old_develop
class develop(old_develop):
__doc__ = old_develop.__doc__
def install_for_development(self):
# Build sources in-place, too.
self.reinitialize_command('build_src', inplace=1)
# Make sure scripts are built.
self.run_command('build_scripts')
old_develop.install_for_development(self)
|
koppen/PrettyCode | refs/heads/master | lib/js-beautify/python/jsbeautifier/__init__.py | 1 | import sys
import getopt
import re
import string
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, einar@jsbeautifier.org,
# MIT licence, enjoy.
#
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10.
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.previous_mode = 'BLOCK'
self.mode = mode
self.var_line = False
self.var_line_tainted = False
self.var_line_reindented = False
self.in_html_comment = False
self.if_line = False
self.chain_extra_indentation = 0
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.eat_next_space = False
self.indentation_baseline = -1
self.indentation_level = 0
self.ternary_depth = 0
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
f = sys.stdin
else:
try:
f = open(file_name)
except Exception as ex:
return 'The file could not be opened'
b = Beautifier()
return b.beautify(''.join(f.readlines()), opts)
def usage():
print("""Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
""")
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
def blank_state(self):
# internal flags
self.flags = BeautifierFlags('BLOCK')
self.flag_store = []
self.wanted_newline = False
self.just_added_newline = False
self.do_block_just_closed = False
if self.opts.indent_with_tabs:
self.indent_string = "\t"
else:
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_word = '' # last TK_WORD seen
self.last_type = 'TK_START_EXPR' # last token type
self.last_text = '' # last token text
self.last_last_text = '' # pre-last token text
self.input = None
self.output = [] # formatted javascript gets built here
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! !! , : ? ^ ^= |= ::'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,if,switch,case,default,for,while,break,function'.split(',')
self.set_mode('BLOCK')
global parser_pos
parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, opts.eval_code)
parser_pos = 0
while True:
token_text, token_type = self.get_next_token()
#print (token_text, token_type, self.flags.mode)
if token_type == 'TK_EOF':
break
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
handlers[token_type](token_text)
self.last_last_text = self.last_text
self.last_type = token_type
self.last_text = token_text
sweet_code = self.preindent_string + re.sub('[\n ]+$', '', ''.join(self.output))
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
while len(self.output) \
and (
self.output[-1] == ' '\
or self.output[-1] == self.indent_string \
or self.output[-1] == self.preindent_string \
or (eat_newlines and self.output[-1] in ['\n', '\r'])):
self.output.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else'];
def is_array(self, mode):
return mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']
def is_expression(self, mode):
return mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]', '(EXPRESSION)', '(FOR-EXPRESSION)', '(COND-EXPRESSION)']
def append_newline_forced(self):
old_array_indentation = self.opts.keep_array_indentation
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = old_array_indentation
def append_newline(self, ignore_repeated = True, reset_statement_flags = True):
self.flags.eat_next_space = False
if self.opts.keep_array_indentation and self.is_array(self.flags.mode):
return
if reset_statement_flags:
self.flags.if_line = False
self.flags.chain_extra_indentation = 0
self.trim_output()
if len(self.output) == 0:
# no newline on start of file
return
if self.output[-1] != '\n' or not ignore_repeated:
self.just_added_newline = True
self.output.append('\n')
if self.preindent_string:
self.output.append(self.preindent_string)
for i in range(self.flags.indentation_level + self.flags.chain_extra_indentation):
self.output.append(self.indent_string)
if self.flags.var_line and self.flags.var_line_reindented:
self.output.append(self.indent_string)
def append(self, s):
if s == ' ':
# do not add just a single space after the // comment, ever
if self.last_type == 'TK_COMMENT':
return self.append_newline()
# make sure only single space gets drawn
if self.flags.eat_next_space:
self.flags.eat_next_space = False
elif len(self.output) and self.output[-1] not in [' ', '\n', self.indent_string]:
self.output.append(' ')
else:
self.just_added_newline = False
self.flags.eat_next_space = False
self.output.append(s)
def indent(self):
self.flags.indentation_level = self.flags.indentation_level + 1
def remove_indent(self):
if len(self.output) and self.output[-1] in [self.indent_string, self.preindent_string]:
self.output.pop()
def set_mode(self, mode):
prev = BeautifierFlags('BLOCK')
if self.flags:
self.flag_store.append(self.flags)
prev = self.flags
self.flags = BeautifierFlags(mode)
if len(self.flag_store) == 1:
self.flags.indentation_level = 0
else:
self.flags.indentation_level = prev.indentation_level
if prev.var_line and prev.var_line_reindented:
self.flags.indentation_level = self.flags.indentation_level + 1
self.flags.previous_mode = prev.mode
def restore_mode(self):
self.do_block_just_closed = self.flags.mode == 'DO_BLOCK'
if len(self.flag_store) > 0:
mode = self.flags.mode
self.flags = self.flag_store.pop()
self.flags.previous_mode = mode
def get_next_token(self):
global parser_pos
self.n_newlines = 0
if parser_pos >= len(self.input):
return '', 'TK_EOF'
self.wanted_newline = False
c = self.input[parser_pos]
parser_pos += 1
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
# slight mess to allow nice preservation of array indentation and reindent that correctly
# first time when we get to the arrays:
# var a = [
# ....'something'
# we make note of whitespace_count = 4 into flags.indentation_baseline
# so we know that 4 whitespaces in original source match indent_level of reindented source
#
# and afterwards, when we get to
# 'something,
# .......'something else'
# we know that this should be indented to indent_level + (7 - indentation_baseline) spaces
whitespace_count = 0
while c in self.whitespace:
if c == '\n':
self.trim_output()
self.output.append('\n')
self.just_added_newline = True
whitespace_count = 0
elif c == '\t':
whitespace_count += 4
elif c == '\r':
pass
else:
whitespace_count += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.flags.indentation_baseline == -1:
self.flags.indentation_baseline = whitespace_count
if self.just_added_newline:
for i in range(self.flags.indentation_level + 1):
self.output.append(self.indent_string)
if self.flags.indentation_baseline != -1:
for i in range(whitespace_count - self.flags.indentation_baseline):
self.output.append(' ')
else: # not keep_whitespace
while c in self.whitespace:
if c == '\n':
if self.opts.max_preserve_newlines == 0 or self.opts.max_preserve_newlines > self.n_newlines:
self.n_newlines += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i == 0)
self.just_added_newline = True
self.wanted_newline = self.n_newlines > 0
if c in self.wordchar:
if parser_pos < len(self.input):
while self.input[parser_pos] in self.wordchar:
c = c + self.input[parser_pos]
parser_pos += 1
if parser_pos == len(self.input):
break
# small and surprisingly unugly hack for 1E-10 representation
if parser_pos != len(self.input) and self.input[parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[parser_pos]
parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
if self.wanted_newline and \
self.last_type != 'TK_OPERATOR' and\
self.last_type != 'TK_EQUALS' and\
not self.flags.if_line and \
(self.opts.preserve_newlines or self.last_text != 'var'):
self.append_newline()
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
comment_mode = 'TK_INLINE_COMMENT'
if self.input[parser_pos] == '*': # peek /* .. */ comment
parser_pos += 1
if parser_pos < len(self.input):
while not (self.input[parser_pos] == '*' and \
parser_pos + 1 < len(self.input) and \
self.input[parser_pos + 1] == '/')\
and parser_pos < len(self.input):
c = self.input[parser_pos]
comment += c
if c in '\r\n':
comment_mode = 'TK_BLOCK_COMMENT'
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 2
return '/*' + comment + '*/', comment_mode
if self.input[parser_pos] == '/': # peek // comment
comment = c
while self.input[parser_pos] not in '\r\n':
comment += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if self.wanted_newline:
self.append_newline()
return comment, 'TK_COMMENT'
if c == "'" or c == '"' or \
(c == '/' and ((self.last_type == 'TK_WORD' and self.is_special_word(self.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(FOR-EXPRESSION)', '(COND-EXPRESSION)']) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR',
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA']))):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
if self.input[parser_pos] == '[':
in_char_class = True
elif self.input[parser_pos] == ']':
in_char_class = False
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
else:
# handle string
while esc or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[parser_pos] == 'u':
esc1 += 1
esc2 = 4
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while parser_pos < len(self.input) and self.input[parser_pos] in self.wordchar:
resulting_string += self.input[parser_pos]
parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output) == 0 and len(self.input) > 1 and self.input[parser_pos] == '!':
resulting_string = c
while parser_pos < len(self.input) and c != '\n':
c = self.input[parser_pos]
resulting_string += c
parser_pos += 1
self.output.append(resulting_string.strip() + "\n")
self.append_newline()
return self.get_next_token()
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if parser_pos < len(self.input) and self.input[parser_pos] in self.digits:
while True:
c = self.input[parser_pos]
sharp += c
parser_pos += 1
if parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or parser_pos >= len(self.input):
pass
elif self.input[parser_pos] == '[' and self.input[parser_pos + 1] == ']':
sharp += '[]'
parser_pos += 2
elif self.input[parser_pos] == '{' and self.input[parser_pos + 1] == '}':
sharp += '{}'
parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[parser_pos - 1 : parser_pos + 3] == '<!--':
parser_pos += 3
c = '<!--'
while parser_pos < len(self.input) and self.input[parser_pos] != '\n':
c += self.input[parser_pos]
parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[parser_pos - 1 : parser_pos + 2] == '-->':
self.flags.in_html_comment = False
parser_pos += 2
if self.wanted_newline:
self.append_newline()
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while parser_pos < len(self.input) and c + self.input[parser_pos] in self.punct:
c += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if c == '=':
return c, 'TK_EQUALS'
if c == ',':
return c, 'TK_COMMA'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if token_text == '[':
if self.last_type == 'TK_WORD' or self.last_text == ')':
if self.last_text in self.line_starters:
self.append(' ')
self.set_mode('(EXPRESSION)')
self.append(token_text)
return
if self.flags.mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']:
if self.last_last_text == ']' and self.last_text == ',':
# ], [ goes to a new line
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
elif self.last_text == '[':
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
else:
self.set_mode('[EXPRESSION]')
else:
self.set_mode('[EXPRESSION]')
else:
if self.last_text == 'for':
self.set_mode('(FOR-EXPRESSION)')
elif self.last_text in ['if', 'while']:
self.set_mode('(COND-EXPRESSION)')
else:
self.set_mode('(EXPRESSION)')
if self.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
if self.wanted_newline:
self.append_newline();
elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.append(' ')
elif self.last_word == 'function' or self.last_word == 'typeof':
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.append(' ')
elif self.last_text in self.line_starters or self.last_text == 'catch':
self.append(' ')
self.append(token_text)
def handle_end_expr(self, token_text):
if token_text == ']':
if self.opts.keep_array_indentation:
if self.last_text == '}':
self.remove_indent()
self.append(token_text)
self.restore_mode()
return
else:
if self.flags.mode == '[INDENTED-EXPRESSION]':
if self.last_text == ']':
self.restore_mode()
self.append_newline()
self.append(token_text)
return
self.restore_mode()
self.append(token_text)
def handle_start_block(self, token_text):
if self.last_word == 'do':
self.set_mode('DO_BLOCK')
else:
self.set_mode('BLOCK')
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR':
if self.last_text == '=' or (self.is_special_word(self.last_text) and self.last_text != 'else'):
self.append(' ')
else:
self.append_newline(True)
self.append(token_text)
self.indent()
else:
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.append(' ')
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.flags.previous_mode) and self.last_text == ',':
if self.last_last_text == '}':
self.append(' ')
else:
self.append_newline()
self.indent()
self.append(token_text)
def handle_end_block(self, token_text):
self.restore_mode()
if self.opts.brace_style == 'expand':
if self.last_text != '{':
self.append_newline()
else:
if self.last_type == 'TK_START_BLOCK':
if self.just_added_newline:
self.remove_indent()
else:
# {}
self.trim_output()
else:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.append(token_text)
def handle_word(self, token_text):
if self.do_block_just_closed:
self.append(' ')
self.append(token_text)
self.append(' ')
self.do_block_just_closed = False
return
if token_text == 'function':
if self.flags.var_line and self.last_text != '=':
self.flags.var_line_reindented = not self.opts.keep_function_indentation
if (self.just_added_newline or self.last_text == ';') and self.last_text != '{':
# make sure there is a nice clean space of at least one blank line
# before a new function definition
have_newlines = self.n_newlines
if not self.just_added_newline:
have_newlines = 0
if not self.opts.preserve_newlines:
have_newlines = 1
for i in range(2 - have_newlines):
self.append_newline(False)
if self.last_text in ['get', 'set', 'new'] or self.last_type == 'TK_WORD':
self.append(' ')
if self.last_type == 'TK_WORD':
if self.last_text in ['get', 'set', 'new', 'return']:
self.append(' ')
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.last_text == '=':
# foo = function
self.append(' ')
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
self.append('function')
self.last_word = 'function'
return
if token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement):
self.append_newline()
if self.flags.case_body:
self.remove_indent();
self.flags.case_body = False
self.flags.indentation_level -= 1;
self.append(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if token_text not in ['else', 'catch', 'finally']:
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.append(' ')
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode in ['BLOCK', 'DO_BLOCK']:
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_WORD':
if self.last_text == 'else':
# eat newlines between ...else *** some_op...
# won't preserve extra newlines in this place (if any), but don't care that much
self.trim_output(True)
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.append(' ')
prefix = 'NEWLINE'
if self.flags.if_line and self.last_type == 'TK_END_EXPR':
self.flags.if_line = False
if token_text in self.line_starters:
if self.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
self.append(' ')
elif prefix == 'NEWLINE':
if self.is_special_word(self.last_text):
# no newline between return nnn
self.append(' ')
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or token_text != 'var') and self.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if token_text == 'if' and self.last_word == 'else' and self.last_text != '{':
self.append(' ')
else:
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif token_text in self.line_starters and self.last_text != ')':
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif self.is_array(self.flags.mode) and self.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.append(' ')
self.append(token_text)
self.last_word = token_text
if token_text == 'var':
self.flags.var_line = True
self.flags.var_line_reindented = False
self.flags.var_line_tainted = False
if token_text == 'if':
self.flags.if_line = True
if token_text == 'else':
self.flags.if_line = False
def handle_semicolon(self, token_text):
self.append(token_text)
self.flags.var_line = False
self.flags.var_line_reindented = False
if self.flags.mode == 'OBJECT':
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = 'BLOCK'
def handle_string(self, token_text):
if self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(COND-EXPRESSION)', '(FOR-EXPRESSION)']:
self.append(' ')
if self.last_type in ['TK_COMMENT', 'TK_STRING', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_SEMICOLON']:
self.append_newline()
elif self.last_type == 'TK_WORD':
self.append(' ')
self.append(token_text)
def handle_equals(self, token_text):
if self.flags.var_line:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.var_line_tainted = True
self.append(' ')
self.append(token_text)
self.append(' ')
def handle_comma(self, token_text):
if self.last_type == 'TK_COMMENT':
self.append_newline();
if self.flags.var_line:
if self.is_expression(self.flags.mode) or self.last_type == 'TK_END_BLOCK':
# do not break on comma, for ( var a = 1, b = 2
self.flags.var_line_tainted = False
if self.flags.var_line_tainted:
self.append(token_text)
self.flags.var_line_reindented = True
self.flags.var_line_tainted = False
self.append_newline()
return
else:
self.flags.var_line_tainted = False
self.append(token_text)
self.append(' ');
return
if self.last_type == 'TK_END_BLOCK' and self.flags.mode != '(EXPRESSION)':
self.append(token_text)
if self.flags.mode == 'OBJECT' and self.last_text == '}':
self.append_newline()
else:
self.append(' ')
else:
if self.flags.mode == 'OBJECT':
self.append(token_text)
self.append_newline()
else:
# EXPR or DO_BLOCK
self.append(token_text)
self.append(' ')
def handle_operator(self, token_text):
space_before = True
space_after = True
if self.is_special_word(self.last_text):
# return had a special handling in TK_WORD
self.append(' ')
self.append(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent();
self.append(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append(token_text)
return
if token_text in ['--', '++', '!'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.last_text in self.line_starters)):
space_before = False
space_after = False
if self.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_WORD' and self.last_text in self.line_starters:
space_before = True
if self.flags.mode == 'BLOCK' and self.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == 'BLOCK':
self.flags.mode = 'OBJECT'
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
if space_before:
self.append(' ')
self.append(token_text)
if space_after:
self.append(' ')
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
# all lines start with an asterisk? that's a proper box comment
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
self.append_newline()
self.append(lines[0])
for line in lines[1:]:
self.append_newline()
self.append(' ' + line.strip())
else:
# simple block comment: leave intact
if len(lines) > 1:
# multiline comment starts on a new line
self.append_newline()
else:
# single line /* ... */ comment stays on the same line
self.append(' ')
for line in lines:
self.append(line)
self.append('\n')
self.append_newline()
def handle_inline_comment(self, token_text):
self.append(' ')
self.append(token_text)
if self.is_expression(self.flags.mode):
self.append(' ')
else:
self.append_newline_forced()
def handle_comment(self, token_text):
if self.last_text == ',' and not self.wanted_newline:
self.trim_output(True)
if self.last_type != 'TK_COMMENT':
if self.wanted_newline:
self.append_newline()
else:
self.append(' ')
self.append(token_text)
self.append_newline();
def handle_dot(self, token_text):
if self.is_special_word(self.last_text):
self.append(' ')
elif self.last_text == ')':
if self.opts.break_chained_methods or self.wanted_newline:
self.flags.chain_extra_indentation = 1;
self.append_newline(True, False)
self.append(token_text)
def handle_unknown(self, token_text):
self.append(token_text)
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:djbkil:xhtf", ['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation'])
except getopt.GetoptError:
return usage()
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve_newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
return usage()
else:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
|
ihsanudin/odoo | refs/heads/8.0 | openerp/service/security.py | 211 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
import openerp.exceptions
def login(db, login, password):
res_users = openerp.registry(db)['res.users']
return res_users._login(db, login, password)
def check_super(passwd):
if passwd == openerp.tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check(db, uid, passwd):
res_users = openerp.registry(db)['res.users']
return res_users.check(db, uid, passwd)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pombredanne/erpnext | refs/heads/develop | erpnext/setup/doctype/global_defaults/global_defaults.py | 55 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "base_rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "base_rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", self.disable_rounded_total, "Check")
|
40223243/40223243w17 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/config.py | 739 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
vityagi/azure-linux-extensions | refs/heads/master | OSPatching/patch/centosPatching.py | 8 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from redhatPatching import redhatPatching
class centosPatching(redhatPatching):
def __init__(self, hutil):
super(centosPatching,self).__init__(hutil)
|
irinabov/debian-qpid-python | refs/heads/master | qpid_tests/broker_0_10/qmf_events.py | 3 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from qpid.messaging import *
from qpid.tests.messaging import Base
from qpidtoollibs.broker import EventHelper
import math
class EventTests (Base):
"""
Test various qmf events
"""
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def test_queue_declare(self):
helper = EventHelper()
# subscribe for queue declare events
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDeclare"))
# create a queue
snd = self.ssn.sender("myq; {create:always, delete:always}")
# ensure we got an event
event = helper.event(rcv.fetch(timeout=1))
assert event.name, "org_apache_qpid_broker:queueDeclare"
assert event.qName, "myq"
def test_queue_delete(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
snd = self.ssn.sender("myq; {create:always, delete:always}")
snd.close()
event = helper.event(rcv.fetch(timeout=1))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
def test_queue_autodelete_exclusive(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
#create new session
ssn2 = self.setup_session()
snd = ssn2.sender("myq; {create:always, node:{x-declare:{auto-delete:True, exclusive:True}}}")
ssn2.close()
event = helper.event(rcv.fetch(timeout=5))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
def test_queue_autodelete_shared(self):
helper = EventHelper()
rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
rcv2 = self.ssn.receiver("myq; {create:always, node:{x-declare:{auto-delete:True}}}")
rcv2.close()
event = helper.event(rcv.fetch(timeout=5))
assert event.name, "org_apache_qpid_broker:queueDelete"
assert event.qName, "myq"
|
ethanyoung/shadowsocks | refs/heads/master | tests/graceful_cli.py | 977 | #!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
neharejanjeva/techstitution | refs/heads/master | app/logs/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2942 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
40223220/2015cc | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/xml/etree/ElementInclude.py | 784 | #
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
file = open(href, 'rb')
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
file = open(href, 'r', encoding=encoding)
data = file.read()
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
|
ernstp/kivy | refs/heads/master | kivy/uix/layout.py | 24 | '''
Layout
======
Layouts are used to calculate and assign widget positions.
The :class:`Layout` class itself cannot be used directly.
You should use one of the following layout classes:
- Anchor layout: :class:`kivy.uix.anchorlayout.AnchorLayout`
- Box layout: :class:`kivy.uix.boxlayout.BoxLayout`
- Float layout: :class:`kivy.uix.floatlayout.FloatLayout`
- Grid layout: :class:`kivy.uix.gridlayout.GridLayout`
- Page Layout: :class:`kivy.uix.pagelayout.PageLayout`
- Relative layout: :class:`kivy.uix.relativelayout.RelativeLayout`
- Scatter layout: :class:`kivy.uix.scatterlayout.ScatterLayout`
- Stack layout: :class:`kivy.uix.stacklayout.StackLayout`
Understanding the `size_hint` Property in `Widget`
--------------------------------------------------
The :attr:`~kivy.uix.Widget.size_hint` is a tuple of values used by
layouts to manage the sizes of their children. It indicates the size
relative to the layout's size instead of an absolute size (in
pixels/points/cm/etc). The format is::
widget.size_hint = (width_percent, height_percent)
The percent is specified as a floating point number in the range 0-1. For
example, 0.5 is 50%, 1 is 100%.
If you want a widget's width to be half of the parent's width and the
height to be identical to the parent's height, you would do::
widget.size_hint = (0.5, 1.0)
If you don't want to use a size_hint for either the width or height, set the
value to None. For example, to make a widget that is 250px wide and 30%
of the parent's height, do::
widget.size_hint = (None, 0.3)
widget.width = 250
Being :class:`Kivy properties <kivy.properties>`, these can also be set via
constructor arguments::
widget = Widget(size_hint=(None, 0.3), width=250)
.. versionchanged:: 1.4.1
The `reposition_child` internal method (made public by mistake) has
been removed.
'''
__all__ = ('Layout', )
from kivy.clock import Clock
from kivy.uix.widget import Widget
class Layout(Widget):
'''Layout interface class, used to implement every layout. See module
documentation for more information.
'''
def __init__(self, **kwargs):
if self.__class__ == Layout:
raise Exception('The Layout class is abstract and \
cannot be used directly.')
self._trigger_layout = Clock.create_trigger(self.do_layout, -1)
super(Layout, self).__init__(**kwargs)
def do_layout(self, *largs):
'''This function is called when a layout is needed by a trigger.
If you are writing a new Layout subclass, don't call this function
directly but use :meth:`_trigger_layout` instead.
.. versionadded:: 1.0.8
'''
raise NotImplementedError('Must be implemented in subclasses.')
def add_widget(self, widget, index=0):
widget.bind(
size=self._trigger_layout,
size_hint=self._trigger_layout)
return super(Layout, self).add_widget(widget, index)
def remove_widget(self, widget):
widget.unbind(
size=self._trigger_layout,
size_hint=self._trigger_layout)
return super(Layout, self).remove_widget(widget)
|
kimt33/pydocstring | refs/heads/master | pydocstring/test/test_numpy_docstring.py | 1 | from nose.tools import assert_raises
from pydocstring.numpy_docstring import parse_numpy
def test_parse_numpy():
"""Tests pydocstring.numpy_docstring.parse_numpy."""
# summary
docstring = 'summary'
assert parse_numpy(docstring) == {'summary': 'summary'}
docstring = 'summary\n'
assert parse_numpy(docstring) == {'summary': 'summary'}
docstring = '\nsummary\n'
assert parse_numpy(docstring) == {'summary': 'summary'}
# FIXME: this should raise an error
docstring = '\n\nsummary\n'
assert parse_numpy(docstring) == {'summary': '', 'extended': ['summary']}
# extended
docstring = 'summary\n\nblock1\n\nblock2'
assert parse_numpy(docstring) == {'summary': 'summary', 'extended': ['block1', 'block2']}
docstring = '\nsummary\n\nblock1\n\nblock2'
assert parse_numpy(docstring) == {'summary': 'summary', 'extended': ['block1', 'block2']}
# FIXME: bug or feature?
docstring = '\n\nsummary\n\nblock1\n\nblock2'
assert parse_numpy(docstring) == {'summary': '', 'extended': ['summary', 'block1', 'block2']}
# extended + headers
docstring = 'summary\n\nblock1\n\nblock2\n\nheader\n------\nstuff'
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['block1', 'block2'],
'header': ['stuff']}
# headers
docstring = 'summary\n\nblock1\n\nblock2\n\nheader1\n--\n\n'
assert_raises(ValueError, parse_numpy, docstring)
for header in ['parameters', 'attributes', 'methods', 'returns', 'yields', 'raises',
'other parameters', 'see also']:
# name + multiple descriptions
docstring = ('summary\n\nblock1\n\nblock2\n\n{0}\n{1}\nabc\n description1.\n'
' description2.'.format(header.title(), '-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['block1', 'block2'],
header: [{'name': 'abc',
'descs': ['description1.', 'description2.']}]}
# name + types + multiple descriptions
docstring = ('summary\n\nblock1\n\nblock2\n\n{0}\n{1}\nabc : str\n description1.\n'
' description2.'.format(header.title(), '-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['block1', 'block2'],
header: [{'name': 'abc',
'types': ['str'],
'descs': ['description1.', 'description2.']}]}
# name + signature + multiple descriptions
docstring = ('summary\n\nblock1\n\nblock2\n\n{0}\n{1}\nabc(x, y)\n description1.\n'
' description2.'.format(header.title(), '-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['block1', 'block2'],
header: [{'name': 'abc',
'signature': '(x, y)',
'descs': ['description1.', 'description2.']}]}
# name + types + signature + multiple descriptions
docstring = ('summary\n\nblock1\n\nblock2\n\n{0}\n{1}\nabc(x, y): str\n description1.\n'
' description2.'.format(header.title(), '-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['block1', 'block2'],
header: [{'name': 'abc',
'types': ['str'],
'signature': '(x, y)',
'descs': ['description1.', 'description2.']}]}
# name + types + signature + multiple descriptions - extended summary
docstring = ('summary\n\n{0}\n{1}\nabc(x, y): str\n description1.\n'
' description2.'.format(header.title(), '-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
header: [{'name': 'abc',
'types': ['str'],
'signature': '(x, y)',
'descs': ['description1.', 'description2.']}]}
# name + types
docstring = ('summary\n\n{0}\n{1}\nabc: str\ndef: int'.format(header.title(),
'-'*len(header)))
assert parse_numpy(docstring) == {'summary': 'summary',
header: [{'name': 'abc',
'types': ['str']},
{'name': 'def',
'types': ['int']}]}
def test_parse_numpy_raw():
"""Test pydocstring.numpy_docstring.parse_numpy with raw strings."""
docstring = '"""summary\n\nextended"""'
assert parse_numpy(docstring, contains_quotes=True) == {'summary': 'summary',
'extended': ['extended']}
docstring = 'r"""summary\n\nextended"""'
assert_raises(NotImplementedError, parse_numpy, docstring, contains_quotes=True)
def test_parse_numpy_self():
"""Test pydocstring.numpy_docstring.parse_numpy using itself as an example."""
docstring = parse_numpy.__doc__
assert (parse_numpy(docstring, contains_quotes=False)['summary'] ==
'Extract numpy docstring as a dictionary.')
assert (parse_numpy(docstring, contains_quotes=False)['extended'] ==
['Multiple descriptions of the indented information (e.g. parameters, '
'attributes, methods, returns, yields, raises, see also) are '
'distinguished from one another with a period. If the period is not '
'present, then the description is assumed to be a multiline '
'description.'])
assert (parse_numpy(docstring, contains_quotes=False)['parameters'] ==
[{'name': 'docstring',
'types': ['str'],
'descs': ['Numpy docstring.']},
{'name': 'contains_quotes',
'types': ['bool'],
'descs': ['True if docstring contains """ or \'\'\'.']}])
assert (parse_numpy(docstring, contains_quotes=False)['returns'] ==
[{'name': 'output',
'types': ['dict'],
'descs': ['Contents of the docstring separated into different section.',
'If the section is summary, then the value is string.',
'If the section is extended, notes, references, or examples, '
'then the value is list of strings.',
'If the section is parameters, other parameters, attributes, '
'methods, returns, yields, raises, or see also, then the value '
'is a dictionary with keys \'name\', \'signature\', \'types\', '
'or \'docs\', with corresonding values string, string, list of '
'strings, and list of strings, respectively.',
'Otherwise, the values is a list of strings.']}])
assert (parse_numpy(docstring, contains_quotes=False)['raises'] ==
[{'name': 'ValueError',
'descs': ['If summary is not in the first or second line.',
'If summary is now followed with a blank line.',
'If number of \'-\' does not match the number of characters in '
'the header.',
'If given entry of the tabbed information (parameters, '
'attributes, methods, returns, yields, raises, see also) '
'had an unexpected pattern.']},
{'name': 'NotImplementedError',
'descs': ['If quotes corresponds to a raw string, i.e. r""".']}])
def test_parse_numpy_equations():
"""Test pydocstring.numpy_docstring.parse_numpy with equations."""
# equation in extended
docstring = ('summary\n\n.. math::\n\n \\frac{1}{2}')
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['.. math::\n\n \\frac{1}{2}']}
docstring = ('summary\n\n'
'.. math::\n\n'
' x &= 2\\\\\n'
' &= y\\\\\n')
assert parse_numpy(docstring) == {'summary': 'summary',
'extended': ['.. math::\n\n x &= 2\\\\\n &= y\\\\']}
# equation in parameter
# single line equation
docstring = ('summary\n\nParameters\n----------\na : float\n .. math::\n\n '
' \\frac{1}{2}')
assert parse_numpy(docstring) == {'summary': 'summary',
'parameters': [{'name': 'a',
'types': ['float'],
'descs': ['.. math::\n\n \\frac{1}{2}\n']}]
}
# multi line equation
docstring = ('summary\n\nParameters\n----------\na : float\n .. math::\n\n'
' \\frac{1}{2}\\\\\n \\frac{1}{3}')
assert parse_numpy(docstring) == {'summary': 'summary',
'parameters': [{'name': 'a',
'types': ['float'],
'descs': ['.. math::\n\n \\frac{1}{2}\\\\'
'\n \\frac{1}{3}\n']}]
}
# multiple equations
docstring = ('summary\n\nParameters\n----------\na : float\n .. math::\n\n'
' \\frac{1}{2}\n ..math::\n \\frac{1}{3}')
assert parse_numpy(docstring) == {'summary': 'summary',
'parameters': [{'name': 'a',
'types': ['float'],
'descs': ['.. math::\n\n \\frac{1}{2}\n',
'..math::\n \\frac{1}{3}\n']}]
}
# multiple equations and other descriptions
docstring = ('summary\n\nParameters\n----------\na : float\n Some float.\n .. math::\n\n'
' \\frac{1}{2}\n\n Yes.\n ..math::\n \\frac{1}{3}\n'
' This is the float.')
assert parse_numpy(docstring) == {'summary': 'summary',
'parameters': [{'name': 'a',
'types': ['float'],
'descs': ['Some float.',
'.. math::\n\n \\frac{1}{2}\n',
'Yes.',
'..math::\n \\frac{1}{3}\n',
'This is the float.']}]
}
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/ship/shared_hutt_heavy_s02_tier4.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_hutt_heavy_s02_tier4.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
frankrousseau/weboob | refs/heads/master | modules/aum/optim/visibility.py | 7 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import BrowserUnavailable
from weboob.capabilities.dating import Optimization
class Visibility(Optimization):
def __init__(self, sched, browser):
super(Visibility, self).__init__()
self._sched = sched
self._browser = browser
self._cron = None
def start(self):
self._cron = self._sched.repeat(60*5, self.reconnect)
return True
def stop(self):
self._sched.cancel(self._cron)
self._cron = None
return True
def is_running(self):
return self._cron is not None
def reconnect(self):
try:
with self._browser:
self._browser.login()
except BrowserUnavailable:
pass
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/numpy/lib/shape_base.py | 26 | from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Note : Although tile may be used for broadcasting, it is strongly
recommended to use numpy's broadcasting operations and functions.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
broadcast_to : Broadcast an array to a new shape
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
|
pexip/os-gyp | refs/heads/master | test/sanitize-rule-names/gyptest-sanitize-rule-names.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure rule names with non-"normal" characters in them don't cause
broken build files. This test was originally causing broken .ninja files.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('sanitize-rule-names.gyp')
test.build('sanitize-rule-names.gyp', test.ALL)
test.pass_test()
|
jade-cheng/Jocx | refs/heads/master | tests/ziphmm/test_matrix.py | 1 | import unittest
from ziphmm import Matrix
from _internal import seq_to_string
from _internal import format_matrix
class TestMatrix(unittest.TestCase):
def test_compute_sum(self):
m = Matrix(2, 3)
m[0, 0] = 1
m[0, 1] = 2
m[0, 2] = 3
m[1, 0] = 4
m[1, 1] = 5
m[1, 2] = 6
self.assertEqual(21, m.compute_sum())
def test_indexers(self):
m = Matrix(2, 3)
m[0, 0] = 1
m[0, 1] = 2
m[0, 2] = 3
m[1, 0] = 4
m[1, 1] = 5
m[1, 2] = 6
self.assertEqual(1, m[0, 0])
self.assertEqual(2, m[0, 1])
self.assertEqual(3, m[0, 2])
self.assertEqual(4, m[1, 0])
self.assertEqual(5, m[1, 1])
self.assertEqual(6, m[1, 2])
def test_init(self):
m = Matrix(2, 3)
self.assertEqual(2, m.height)
self.assertEqual(3, m.width)
self.assertEqual('{{0.0,0.0,0.0},{0.0,0.0,0.0}}', format_matrix(m))
def test_mul(self):
m = Matrix(2, 3)
m[0, 0] = 1
m[0, 1] = 2
m[0, 2] = 3
m[1, 0] = 4
m[1, 1] = 5
m[1, 2] = 6
n = Matrix(3, 2)
n[0, 0] = 1
n[0, 1] = 2
n[1, 0] = 3
n[1, 1] = 4
n[2, 0] = 5
n[2, 1] = 6
q = m * n
self.assertEqual('{{22.0,28.0},{49.0,64.0}}', format_matrix(q))
def test_scale(self):
m = Matrix(2, 3)
m[0, 0] = 1
m[0, 1] = 2
m[0, 2] = 3
m[1, 0] = 4
m[1, 1] = 5
m[1, 2] = 6
m.scale(2)
self.assertEqual('{{2.0,4.0,6.0},{8.0,10.0,12.0}}', format_matrix(m))
if __name__ == '__main__':
unittest.main()
|
geopython/QGIS | refs/heads/master | python/PyQt/PyQt5/QtCore.py | 17 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QtCore.py
---------------------
Date : November 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'November 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtCore import *
from types import MethodType
_QVariant__repr__ = QVariant.__repr__
_QVariant__eq__ = QVariant.__eq__
_QVariant__ne__ = QVariant.__ne__
_QVariant__hash__ = QVariant.__hash__
def __bool__(self):
return not self.isNull()
def __repr__(self):
if self.isNull():
return 'NULL'
else:
return _QVariant__repr__(self)
def __eq__(self, other):
if self.isNull():
return (isinstance(other, QVariant) and other.isNull())or other is None
else:
return _QVariant__eq__(self, other)
def __ne__(self, other):
if self.isNull():
return not (isinstance(other, QVariant) and other.isNull()) and other is not None
else:
return _QVariant__ne__(self, other)
def __hash__(self):
if self.isNull():
return 2178309
else:
return _QVariant__hash__(self)
QVariant.__bool__ = __bool__
QVariant.__repr__ = __repr__
QVariant.__eq__ = __eq__
QVariant.__ne__ = __ne__
QVariant.__hash__ = __hash__
NULL = QVariant(QVariant.Int)
|
nwspeete-ibm/openwhisk | refs/heads/master | core/javaAction/run.py | 1 | import os
import sys
import json
import requests
import codecs
HOST=os.environ["CONTAINER"]
if HOST == "":
HOST = "localhost"
DEST="http://%s:8080/run" % HOST
def content_from_args(args):
if len(args) == 0:
return {}
if len(args) == 1 and os.path.exists(args[0]):
with open(args[0]) as fp:
return json.load(fp)
# else...
in_str = " ".join(args)
try:
d = json.loads(in_str)
if isinstance(d, dict):
return d
else:
raise "Not a dict."
except:
return { "payload" : " ".join(sys.argv[1:]) }
value = content_from_args(sys.argv[1:])
print "Sending value: %s..." % json.dumps(value)[0:40]
r = requests.post(DEST, json.dumps({ "value" : value }))
print r.text
|
mailhexu/pyDFTutils | refs/heads/master | build/lib/pyDFTutils/perovskite/cubic_perovskite.py | 1 | #!/usr/bin/env python
from pyDFTutils.perovskite.lattice_factory import PerovskiteCubic
from pyDFTutils.ase_utils import my_write_vasp,normalize, vesta_view, set_element_mag
from ase.io.vasp import read_vasp
from ase.atoms import string2symbols
import numpy as np
from ase.build import make_supercell
def gen222(name=None,
A='Sr',
B='Mn',
O='O',
latticeconstant=3.9,
mag_order='FM',
m=5,
sort=True):
if name is not None:
symbols=string2symbols(name)
A, B, O, _, _ = symbols
atoms = PerovskiteCubic([A, B, O], latticeconstant=latticeconstant)
atoms = atoms.repeat([2, 2, 2])
if sort:
my_write_vasp('UCPOSCAR', atoms, vasp5=True, sort=True)
atoms = read_vasp('UCPOSCAR')
spin_dn = {
'FM': [],
'A': [0, 1, 4, 5],
'C': [0, 2, 5, 7],
'G': [0, 3, 5, 6]
}
if mag_order != 'PM':
mag = np.ones(8)
mag[np.array(spin_dn[mag_order], int)] = -1.0
atoms = set_element_mag(atoms, B, mag * m)
return atoms
def gen_primitive(name=None,A=None,B=None,O=None, latticeconstant=3.9, mag_order='FM', m=5):
"""
generate primitive cell with magnetic order.
Parameters:
---------------
name: string
ABO3, eg. BiFeO3, CsPbF3
"""
if name is not None:
symbols=string2symbols(name)
A, B, O, _, _ = symbols
atoms = PerovskiteCubic([A, B, O], latticeconstant=latticeconstant)
direction_dict = {
'A': ([1, 0, 0], [0, 1, 0], [0, 0, 2]),
'C': ([1, -1, 0], [1, 1, 0], [0, 0, 1]),
'G': ([0, 1, 1], [1, 0, 1], [1, 1, 0]),
'FM': np.eye(3),
}
size_dict = {'A': (1, 1, 2), 'C': (1, 1, 1), 'G': (1, 1, 1)}
A, B, O = atoms.get_chemical_symbols()[0:3]
if mag_order == 'PM':
atoms = atoms
elif mag_order == 'FM':
atoms = atoms
atoms = set_element_mag(atoms, B, [m])
else:
atoms.translate([0.045] * 3)
atoms = normalize(atoms)
atoms = make_supercell(atoms, direction_dict[mag_order])
atoms.translate([-0.045] * 3)
atoms = set_element_mag(atoms, B, [m, -m])
return atoms
if __name__ == '__main__':
atoms = gen_primitive(name='LaMnO3',mag_order='G')
vesta_view(atoms)
|
verycumbersome/the-blue-alliance | refs/heads/master | tests/test_award_manipulator.py | 3 | import json
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.award_type import AwardType
from consts.event_type import EventType
from helpers.award_manipulator import AwardManipulator
from models.award import Award
from models.event import Event
from models.team import Team
class TestAwardManipulator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.event = Event(
id="2013casj",
event_short="casj",
year=2013,
event_type_enum=EventType.REGIONAL,
)
self.old_award = Award(
id=Award.render_key_name(self.event.key_name, AwardType.WINNER),
name_str="Regional Winner",
award_type_enum=AwardType.WINNER,
year=2013,
event=self.event.key,
event_type_enum=EventType.REGIONAL,
team_list=[ndb.Key(Team, 'frc111'), ndb.Key(Team, 'frc234')],
recipient_json_list=[json.dumps({'team_number': 111, 'awardee': None}),
json.dumps({'team_number': 234, 'awardee': None})],
)
self.new_award = Award(
id="2013casj_1",
name_str="Regional Champion",
award_type_enum=AwardType.WINNER,
year=2013,
event=self.event.key,
event_type_enum=EventType.REGIONAL,
team_list=[ndb.Key(Team, 'frc359')],
recipient_json_list=[json.dumps({'team_number': 359, 'awardee': None})],
)
def tearDown(self):
self.testbed.deactivate()
def assertMergedAward(self, award, is_auto_union):
self.assertEqual(award.name_str, "Regional Champion")
self.assertEqual(award.award_type_enum, AwardType.WINNER)
self.assertEqual(award.year, 2013)
self.assertEqual(award.event, self.event.key)
self.assertEqual(award.event_type_enum, EventType.REGIONAL)
if is_auto_union:
self.assertEqual(set(award.team_list), {ndb.Key(Team, 'frc111'), ndb.Key(Team, 'frc234'), ndb.Key(Team, 'frc359')})
self.assertEqual(len(award.recipient_json_list), 3)
for r in award.recipient_json_list:
self.assertTrue(json.loads(r) in [{'team_number': 111, 'awardee': None}, {'team_number': 234, 'awardee': None}, {'team_number': 359, 'awardee': None}])
else:
self.assertEqual(set(award.team_list), {ndb.Key(Team, 'frc359')})
self.assertEqual(len(award.recipient_json_list), 1)
for r in award.recipient_json_list:
self.assertTrue(json.loads(r) in [{'team_number': 359, 'awardee': None}])
def assertOldAward(self, award):
self.assertEqual(award.name_str, "Regional Winner")
self.assertEqual(award.award_type_enum, AwardType.WINNER)
self.assertEqual(award.year, 2013)
self.assertEqual(award.event, self.event.key)
self.assertEqual(award.event_type_enum, EventType.REGIONAL)
self.assertEqual(set(award.team_list), {ndb.Key(Team, 'frc111'), ndb.Key(Team, 'frc234')})
self.assertEqual(len(award.recipient_json_list), 2)
for r in award.recipient_json_list:
self.assertTrue(json.loads(r) in [{'team_number': 111, 'awardee': None}, {'team_number': 234, 'awardee': None}])
def test_createOrUpdate(self):
AwardManipulator.createOrUpdate(self.old_award)
self.assertOldAward(Award.get_by_id("2013casj_1"))
AwardManipulator.createOrUpdate(self.new_award)
self.assertMergedAward(Award.get_by_id("2013casj_1"), True)
def test_findOrSpawn(self):
self.old_award.put()
self.assertMergedAward(AwardManipulator.findOrSpawn(self.new_award), True)
def test_updateMerge(self):
self.assertMergedAward(AwardManipulator.updateMerge(self.new_award, self.old_award), True)
def test_createOrUpdate_no_auto_union(self):
AwardManipulator.createOrUpdate(self.old_award)
self.assertOldAward(Award.get_by_id("2013casj_1"))
AwardManipulator.createOrUpdate(self.new_award, auto_union=False)
self.assertMergedAward(Award.get_by_id("2013casj_1"), False)
def test_findOrSpawn_no_auto_union(self):
self.old_award.put()
self.assertMergedAward(AwardManipulator.findOrSpawn(self.new_award, auto_union=False), False)
def test_updateMerge_no_auto_union(self):
self.assertMergedAward(AwardManipulator.updateMerge(self.new_award, self.old_award, auto_union=False), False)
|
teeple/pns_server | refs/heads/master | work/install/Python-2.7.4/Lib/multiprocessing/sharedctypes.py | 131 | #
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import sys
import ctypes
import weakref
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, (int, long)):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
|
schlueter/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_cluster.py | 47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_cluster
short_description: Manage VMware vSphere clusters
description:
- Add or remove VMware vSphere clusters.
version_added: '2.0'
author: Joseph Callen (@jcpowermac)
requirements:
- Tested on ESXi 5.5
- PyVmomi installed
options:
cluster_name:
description:
- The name of the cluster that will be created.
required: yes
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: yes
enable_drs:
description:
- If set to C(yes) will enable DRS when the cluster is created.
type: bool
default: 'no'
enable_ha:
description:
- If set to C(yes) will enable HA when the cluster is created.
type: bool
default: 'no'
enable_vsan:
description:
- If set to C(yes) will enable vSAN when the cluster is created.
type: bool
default: 'no'
state:
description:
- Create (C(present)) or remove (C(absent)) a VMware vSphere cluster.
choices: [absent, present]
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create Cluster
local_action:
module: vmware_cluster
hostname: '{{ ansible_ssh_host }}'
username: root
password: vmware
datacenter_name: datacenter
cluster_name: cluster
enable_ha: yes
enable_drs: yes
enable_vsan: yes
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI,
TaskError,
connect_to_api,
find_cluster_by_name_datacenter,
find_datacenter_by_name,
vmware_argument_spec,
wait_for_task
)
class VMwareCluster(object):
def __init__(self, module):
self.module = module
self.cluster_name = module.params['cluster_name']
self.datacenter_name = module.params['datacenter_name']
self.enable_drs = module.params['enable_drs']
self.enable_ha = module.params['enable_ha']
self.enable_vsan = module.params['enable_vsan']
self.desired_state = module.params['state']
self.datacenter = None
self.cluster = None
self.content = connect_to_api(module)
def process_state(self):
cluster_states = {
'absent': {
'present': self.state_destroy_cluster,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_cluster,
'present': self.state_exit_unchanged,
'absent': self.state_create_cluster,
}
}
current_state = self.check_cluster_configuration()
# Based on the desired_state and the current_state call
# the appropriate method from the dictionary
cluster_states[self.desired_state][current_state]()
def configure_ha(self):
das_config = vim.cluster.DasConfigInfo()
das_config.enabled = self.enable_ha
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
das_config.admissionControlPolicy.failoverLevel = 2
return das_config
def configure_drs(self):
drs_config = vim.cluster.DrsConfigInfo()
drs_config.enabled = self.enable_drs
# Set to partially automated
drs_config.vmotionRate = 3
return drs_config
def configure_vsan(self):
vsan_config = vim.vsan.cluster.ConfigInfo()
vsan_config.enabled = self.enable_vsan
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
vsan_config.defaultConfig.autoClaimStorage = False
return vsan_config
def state_create_cluster(self):
try:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = self.configure_ha()
cluster_config_spec.drsConfig = self.configure_drs()
if self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
if not self.module.check_mode:
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
self.module.exit_json(changed=True)
except vim.fault.DuplicateName:
self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
except vim.fault.InvalidName:
self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
except vmodl.fault.NotSupported:
# This should never happen
self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
# This should never happen either
self.module.fail_json(msg=method_fault.msg)
def state_destroy_cluster(self):
changed = True
result = None
try:
if not self.module.check_mode:
task = self.cluster.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vim.fault.VimFault as vim_fault:
self.module.fail_json(msg=vim_fault.msg)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_cluster(self):
cluster_config_spec = vim.cluster.ConfigSpecEx()
changed = True
result = None
if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
cluster_config_spec.dasConfig = self.configure_ha()
if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
cluster_config_spec.drsConfig = self.configure_drs()
if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
try:
if not self.module.check_mode:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except TaskError as task_e:
self.module.fail_json(msg=str(task_e))
def check_cluster_configuration(self):
try:
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist, "
"please create first with Ansible Module vmware_datacenter or manually."
% self.datacenter_name)
self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
if self.cluster is None:
return 'absent'
else:
desired_state = (self.enable_ha,
self.enable_drs,
self.enable_vsan)
current_state = (self.cluster.configurationEx.dasConfig.enabled,
self.cluster.configurationEx.drsConfig.enabled,
self.cluster.configurationEx.vsanConfigInfo.enabled)
if desired_state != current_state:
return 'update'
else:
return 'present'
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
cluster_name=dict(type='str', required=True),
datacenter_name=dict(type='str', required=True),
enable_drs=dict(type='bool', default=False),
enable_ha=dict(type='bool', default=False),
enable_vsan=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_cluster = VMwareCluster(module)
vmware_cluster.process_state()
if __name__ == '__main__':
main()
|
dscdac/Proyecto-IV-modulo2 | refs/heads/master | lib/python2.7/site-packages/setuptools/command/install_lib.py | 454 | from distutils.command.install_lib import install_lib as _install_lib
import os
class install_lib(_install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
if not py_file.endswith('.py'):
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
exclude = {}
nsp = self.distribution.namespace_packages
if (nsp and self.get_finalized_command('install')
.single_version_externally_managed
):
for pkg in nsp:
parts = pkg.split('.')
while parts:
pkgdir = os.path.join(self.install_dir, *parts)
for f in '__init__.py', '__init__.pyc', '__init__.pyo':
exclude[os.path.join(pkgdir,f)] = 1
parts.pop()
return exclude
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return _install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = _install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
|
betoesquivel/fil2014 | refs/heads/master | build/django/build/lib.linux-x86_64-2.7/django/http/utils.py | 134 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
efortuna/AndroidSDKClone | refs/heads/master | ndk/prebuilt/linux-x86_64/lib/python2.7/unittest/test/test_setups.py | 153 | import sys
from cStringIO import StringIO
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegexp(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
nishizhen/p2pool | refs/heads/master | wstools/tests/test_wsdl.py | 289 | #!/usr/bin/env python
############################################################################
# Joshua R. Boverhof, David W. Robertson, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import sys, unittest
import ConfigParser
import os
from wstools.Utility import DOM
from wstools.WSDLTools import WSDLReader
from wstools.TimeoutSocket import TimeoutError
from wstools import tests
cwd = os.path.dirname(tests.__file__)
class WSDLToolsTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.path = nameGenerator.next()
print self.path
sys.stdout.flush()
def __str__(self):
teststr = unittest.TestCase.__str__(self)
if hasattr(self, "path"):
return "%s: %s" % (teststr, self.path )
else:
return "%s" % (teststr)
def checkWSDLCollection(self, tag_name, component, key='name'):
if self.wsdl is None:
return
definition = self.wsdl.document.documentElement
version = DOM.WSDLUriToVersion(definition.namespaceURI)
nspname = DOM.GetWSDLUri(version)
for node in DOM.getElements(definition, tag_name, nspname):
name = DOM.getAttr(node, key)
comp = component[name]
self.failUnlessEqual(eval('comp.%s' %key), name)
def checkXSDCollection(self, tag_name, component, node, key='name'):
for cnode in DOM.getElements(node, tag_name):
name = DOM.getAttr(cnode, key)
component[name]
def test_all(self):
try:
if self.path[:7] == 'http://':
self.wsdl = WSDLReader().loadFromURL(self.path)
else:
self.wsdl = WSDLReader().loadFromFile(self.path)
except TimeoutError:
print "connection timed out"
sys.stdout.flush()
return
except:
self.path = self.path + ": load failed, unable to start"
raise
try:
self.checkWSDLCollection('service', self.wsdl.services)
except:
self.path = self.path + ": wsdl.services"
raise
try:
self.checkWSDLCollection('message', self.wsdl.messages)
except:
self.path = self.path + ": wsdl.messages"
raise
try:
self.checkWSDLCollection('portType', self.wsdl.portTypes)
except:
self.path = self.path + ": wsdl.portTypes"
raise
try:
self.checkWSDLCollection('binding', self.wsdl.bindings)
except:
self.path = self.path + ": wsdl.bindings"
raise
try:
self.checkWSDLCollection('import', self.wsdl.imports, key='namespace')
except:
self.path = self.path + ": wsdl.imports"
raise
try:
for key in self.wsdl.types.keys():
schema = self.wsdl.types[key]
self.failUnlessEqual(key, schema.getTargetNamespace())
definition = self.wsdl.document.documentElement
version = DOM.WSDLUriToVersion(definition.namespaceURI)
nspname = DOM.GetWSDLUri(version)
for node in DOM.getElements(definition, 'types', nspname):
for snode in DOM.getElements(node, 'schema'):
tns = DOM.findTargetNS(snode)
schema = self.wsdl.types[tns]
self.schemaAttributesDeclarations(schema, snode)
self.schemaAttributeGroupDeclarations(schema, snode)
self.schemaElementDeclarations(schema, snode)
self.schemaTypeDefinitions(schema, snode)
except:
self.path = self.path + ": wsdl.types"
raise
if self.wsdl.extensions:
print 'No check for WSDLTools(%s) Extensions:' %(self.wsdl.name)
for ext in self.wsdl.extensions: print '\t', ext
def schemaAttributesDeclarations(self, schema, node):
self.checkXSDCollection('attribute', schema.attr_decl, node)
def schemaAttributeGroupDeclarations(self, schema, node):
self.checkXSDCollection('group', schema.attr_groups, node)
def schemaElementDeclarations(self, schema, node):
self.checkXSDCollection('element', schema.elements, node)
def schemaTypeDefinitions(self, schema, node):
self.checkXSDCollection('complexType', schema.types, node)
self.checkXSDCollection('simpleType', schema.types, node)
def setUpOptions(section):
cp = ConfigParser.ConfigParser()
cp.read(cwd+'/config.txt')
if not cp.sections():
print 'fatal error: configuration file config.txt not present'
sys.exit(0)
if not cp.has_section(section):
print '%s section not present in configuration file, exiting' % section
sys.exit(0)
return cp, len(cp.options(section))
def getOption(cp, section):
for name, value in cp.items(section):
yield value
def makeTestSuite(section='services_by_file'):
global nameGenerator
cp, numTests = setUpOptions(section)
nameGenerator = getOption(cp, section)
suite = unittest.TestSuite()
for i in range(0, numTests):
suite.addTest(unittest.makeSuite(WSDLToolsTestCase, 'test_'))
return suite
def main():
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
|
2014cdbg13/cdbg13 | refs/heads/master | wsgi/static/Brython2.1.0-20140419-113919/Lib/re.py | 54 | #
# Copyright (c) 2014 Olemis Lang. All rights reserved.
#
# Choose either Javascript (faster) or Python engine based on regex complexity
# with a noticeable preference for the former.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode whitespace characters.
\S Matches any non-whitespace character; equivalent to [^\s].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sys
import _jsre
_pymdl = [None]
if not _jsre._is_valid():
from pyre import *
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE",
# TODO: brython - same exception class in sre_constants and _jsre
#"error"
]
__version__ = "2.2.1"
# flags
A = ASCII = _jsre.A # assume ascii "locale"
I = IGNORECASE = _jsre.I # ignore case
L = LOCALE = _jsre.L # assume current 8-bit locale
U = UNICODE = _jsre.U # assume unicode "locale"
M = MULTILINE = _jsre.M # make anchors look for newline
S = DOTALL = _jsre.S # make dot match newline
X = VERBOSE = _jsre.X # ignore whitespace and comments
# sre exception
# TODO: brython - same exception class in sre_constants and _jsre
#error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def _pyre():
mdl = _pymdl[0]
if mdl is None:
import pyre
_pymdl[0] = pyre
return pyre
else:
return mdl
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
if _jsre._is_valid(pattern):
return _jsre.match(pattern, string, flags)
else:
return _pyre().match(pattern, string, flags)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
if _jsre._is_valid(pattern):
return _jsre.search(pattern, string, flags)
else:
return _pyre().search(pattern, string, flags)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
if _jsre._is_valid(pattern):
return _jsre.sub(pattern, repl, string, count, flags)
else:
return _pyre().sub(pattern, repl, string, count, flags)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
if _jsre._is_valid(pattern):
return _jsre.subn(pattern, repl, string, count, flags)
else:
return _pyre().subn(pattern, repl, string, count, flags)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. If
capturing parentheses are used in pattern, then the text of all
groups in the pattern are also returned as part of the resulting
list. If maxsplit is nonzero, at most maxsplit splits occur,
and the remainder of the string is returned as the final element
of the list."""
if _jsre._is_valid(pattern):
return _jsre.split(pattern, string, maxsplit, flags)
else:
return _pyre().split(pattern, string, maxsplit, flags)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more capturing groups are present in the pattern, return
a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
if _jsre._is_valid(pattern):
return _jsre.findall(pattern, string, flags)
else:
return _pyre().findall(pattern, string, flags)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _pyre().finditer(pattern, string, flags)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
if _jsre._is_valid(pattern):
return _jsre.compile(pattern, flags)
else:
return _pyre().compile(pattern, flags)
def purge():
"Clear the regular expression caches"
if _pymdl[0] is not None:
return _pymdl[0].purge()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _pyre().template(pattern, flags)
def escape(pattern):
"""
Escape all the characters in pattern except ASCII letters, numbers and '_'.
"""
# FIXME: Do not load _re module
return _pyre().escape(pattern)
|
andykimpe/chromium-test-npapi | refs/heads/master | chrome/test/chromedriver/test/unittest_util.py | 134 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Returns a filtered list of tests based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = pattern_groups[0].split(':')
negative_patterns = None
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
tests = []
for test in all_tests:
test_name = GetTestName(test)
# Test name must by matched by one positive pattern.
for pattern in positive_patterns:
if fnmatch.fnmatch(test_name, pattern):
break
else:
continue
# Test name must not be matched by any negative patterns.
for pattern in negative_patterns or []:
if fnmatch.fnmatch(test_name, pattern):
break
else:
tests += [test]
return tests
|
jbking/demo-appengine-django-golang | refs/heads/master | myproject/django/test/simple.py | 78 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on it's own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (unittest.TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
|
lmallin/coverage_test | refs/heads/master | python_venv/lib/python2.7/site-packages/pandas/tests/series/test_missing.py | 3 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isnull, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData(TestData):
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isnull(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isnull(td1[0])
td1[1] = iNaT
assert isnull(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isnull(td1[1])
td1[2] = NaT
assert isnull(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isnull(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isnull(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
assert result.name == self.ts.name
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData(TestData):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method='polynomial')
with pytest.raises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', min_version='0.15',
app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
def test_spline_error(self):
# see gh-10633
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
|
slisson/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/hgext/inotify/__init__.py | 92 | # __init__.py - inotify-based status acceleration for Linux
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''accelerate status report using Linux's inotify service'''
# todo: socket permissions
from mercurial.i18n import _
from mercurial import util
import server
from client import client, QueryFailed
testedwith = 'internal'
def serve(ui, repo, **opts):
'''start an inotify server for this repository'''
server.start(ui, repo.dirstate, repo.root, opts)
def debuginotify(ui, repo, **opts):
'''debugging information for inotify extension
Prints the list of directories being watched by the inotify server.
'''
cli = client(ui, repo)
response = cli.debugquery()
ui.write(_('directories being watched:\n'))
for path in response:
ui.write((' %s/\n') % path)
def reposetup(ui, repo):
if not util.safehasattr(repo, 'dirstate'):
return
class inotifydirstate(repo.dirstate.__class__):
# We'll set this to false after an unsuccessful attempt so that
# next calls of status() within the same instance don't try again
# to start an inotify server if it won't start.
_inotifyon = True
def status(self, match, subrepos, ignored, clean, unknown):
files = match.files()
if '.' in files:
files = []
if (self._inotifyon and not ignored and not subrepos and
not self._dirty):
cli = client(ui, repo)
try:
result = cli.statusquery(files, match, False,
clean, unknown)
except QueryFailed, instr:
ui.debug(str(instr))
# don't retry within the same hg instance
inotifydirstate._inotifyon = False
pass
else:
if ui.config('inotify', 'debug'):
r2 = super(inotifydirstate, self).status(
match, [], False, clean, unknown)
for c, a, b in zip('LMARDUIC', result, r2):
for f in a:
if f not in b:
ui.warn('*** inotify: %s +%s\n' % (c, f))
for f in b:
if f not in a:
ui.warn('*** inotify: %s -%s\n' % (c, f))
result = r2
return result
return super(inotifydirstate, self).status(
match, subrepos, ignored, clean, unknown)
repo.dirstate.__class__ = inotifydirstate
cmdtable = {
'debuginotify':
(debuginotify, [], ('hg debuginotify')),
'^inserve':
(serve,
[('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '',
_('used internally by daemon mode'), _('NUM')),
('t', 'idle-timeout', '',
_('minutes to sit idle before exiting'), _('NUM')),
('', 'pid-file', '',
_('name of file to write process ID to'), _('FILE'))],
_('hg inserve [OPTION]...')),
}
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.5.0/Lib/idlelib/AutoComplete.py | 82 | """AutoComplete.py - An IDLE extension for automatically completing names.
This extension can complete either attribute names of file names. It can pop
a window with all available names, for the user to select from.
"""
import os
import sys
import string
from idlelib.configHandler import idleConf
# This string includes all chars that may be in an identifier
ID_CHARS = string.ascii_letters + string.digits + "_"
# These constants represent the two different types of completions
COMPLETE_ATTRIBUTES, COMPLETE_FILES = range(1, 2+1)
from idlelib import AutoCompleteWindow
from idlelib.HyperParser import HyperParser
import __main__
SEPS = os.sep
if os.altsep: # e.g. '/' on Windows...
SEPS += os.altsep
class AutoComplete:
menudefs = [
('edit', [
("Show Completions", "<<force-open-completions>>"),
])
]
popupwait = idleConf.GetOption("extensions", "AutoComplete",
"popupwait", type="int", default=0)
def __init__(self, editwin=None):
self.editwin = editwin
if editwin is None: # subprocess and test
return
self.text = editwin.text
self.autocompletewindow = None
# id of delayed call, and the index of the text insert when the delayed
# call was issued. If _delayed_completion_id is None, there is no
# delayed call.
self._delayed_completion_id = None
self._delayed_completion_index = None
def _make_autocomplete_window(self):
return AutoCompleteWindow.AutoCompleteWindow(self.text)
def _remove_autocomplete_window(self, event=None):
if self.autocompletewindow:
self.autocompletewindow.hide_window()
self.autocompletewindow = None
def force_open_completions_event(self, event):
"""Happens when the user really wants to open a completion list, even
if a function call is needed.
"""
self.open_completions(True, False, True)
def try_open_completions_event(self, event):
"""Happens when it would be nice to open a completion list, but not
really necessary, for example after an dot, so function
calls won't be made.
"""
lastchar = self.text.get("insert-1c")
if lastchar == ".":
self._open_completions_later(False, False, False,
COMPLETE_ATTRIBUTES)
elif lastchar in SEPS:
self._open_completions_later(False, False, False,
COMPLETE_FILES)
def autocomplete_event(self, event):
"""Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
"""
if hasattr(event, "mc_state") and event.mc_state:
# A modifier was pressed along with the tab, continue as usual.
return
if self.autocompletewindow and self.autocompletewindow.is_active():
self.autocompletewindow.complete()
return "break"
else:
opened = self.open_completions(False, True, True)
if opened:
return "break"
def _open_completions_later(self, *args):
self._delayed_completion_index = self.text.index("insert")
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = \
self.text.after(self.popupwait, self._delayed_open_completions,
*args)
def _delayed_open_completions(self, *args):
self._delayed_completion_id = None
if self.text.index("insert") != self._delayed_completion_index:
return
self.open_completions(*args)
def open_completions(self, evalfuncs, complete, userWantsWin, mode=None):
"""Find the completions and create the AutoCompleteWindow.
Return True if successful (no syntax error or so found).
if complete is True, then if there's nothing to complete and no
start of completion, won't open completions and return False.
If mode is given, will open a completion list only in this mode.
"""
# Cancel another delayed call, if it exists.
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = None
hp = HyperParser(self.editwin, "insert")
curline = self.text.get("insert linestart", "insert")
i = j = len(curline)
if hp.is_in_string() and (not mode or mode==COMPLETE_FILES):
# Find the beginning of the string
# fetch_completions will look at the file system to determine whether the
# string value constitutes an actual file name
# XXX could consider raw strings here and unescape the string value if it's
# not raw.
self._remove_autocomplete_window()
mode = COMPLETE_FILES
# Find last separator or string start
while i and curline[i-1] not in "'\"" + SEPS:
i -= 1
comp_start = curline[i:j]
j = i
# Find string start
while i and curline[i-1] not in "'\"":
i -= 1
comp_what = curline[i:j]
elif hp.is_in_code() and (not mode or mode==COMPLETE_ATTRIBUTES):
self._remove_autocomplete_window()
mode = COMPLETE_ATTRIBUTES
while i and (curline[i-1] in ID_CHARS or ord(curline[i-1]) > 127):
i -= 1
comp_start = curline[i:j]
if i and curline[i-1] == '.':
hp.set_index("insert-%dc" % (len(curline)-(i-1)))
comp_what = hp.get_expression()
if not comp_what or \
(not evalfuncs and comp_what.find('(') != -1):
return
else:
comp_what = ""
else:
return
if complete and not comp_what and not comp_start:
return
comp_lists = self.fetch_completions(comp_what, mode)
if not comp_lists[0]:
return
self.autocompletewindow = self._make_autocomplete_window()
return not self.autocompletewindow.show_window(
comp_lists, "insert-%dc" % len(comp_start),
complete, mode, userWantsWin)
def fetch_completions(self, what, mode):
"""Return a pair of lists of completions for something. The first list
is a sublist of the second. Both are sorted.
If there is a Python subprocess, get the comp. list there. Otherwise,
either fetch_completions() is running in the subprocess itself or it
was called in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_completion_list",
(what, mode), {})
else:
if mode == COMPLETE_ATTRIBUTES:
if what == "":
namespace = __main__.__dict__.copy()
namespace.update(__main__.__builtins__.__dict__)
bigl = eval("dir()", namespace)
bigl.sort()
if "__all__" in bigl:
smalll = sorted(eval("__all__", namespace))
else:
smalll = [s for s in bigl if s[:1] != '_']
else:
try:
entity = self.get_entity(what)
bigl = dir(entity)
bigl.sort()
if "__all__" in bigl:
smalll = sorted(entity.__all__)
else:
smalll = [s for s in bigl if s[:1] != '_']
except:
return [], []
elif mode == COMPLETE_FILES:
if what == "":
what = "."
try:
expandedpath = os.path.expanduser(what)
bigl = os.listdir(expandedpath)
bigl.sort()
smalll = [s for s in bigl if s[:1] != '.']
except OSError:
return [], []
if not smalll:
smalll = bigl
return smalll, bigl
def get_entity(self, name):
"""Lookup name in a namespace spanning sys.modules and __main.dict__"""
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
return eval(name, namespace)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_autocomplete', verbosity=2)
|
dbbhattacharya/kitsune | refs/heads/master | vendor/packages/South/south/tests/circular_a/migrations/0001_first.py | 174 | from south.db import db
from django.db import models
class Migration:
depends_on = [('circular_b', '0001_first')]
def forwards(self):
pass
def backwards(self):
pass
|
proversity-org/edx-platform | refs/heads/master | common/test/acceptance/tests/studio/test_studio_course_team.py | 20 | """
Acceptance tests for course in studio
"""
from nose.plugins.attrib import attr
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.index import DashboardPage
from common.test.acceptance.pages.studio.users import CourseTeamPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
@attr(shard=2)
class CourseTeamPageTest(StudioCourseTest):
""" As a course author, I want to be able to add others to my team """
def _make_user(self, username):
""" Registers user and returns user representation dictionary as expected by `log_in` function """
user = {
'username': username,
'email': username + "@example.com",
'password': username + '123'
}
AutoAuthPage(
self.browser, no_login=True,
username=user.get('username'), email=user.get('email'), password=user.get('password')
).visit()
return user
def _update_user(self, user_info):
"""
Update user with provided `user_info`
Arguments:
`user_info`: dictionary containing values of attributes to be updated
"""
AutoAuthPage(
self.browser, no_login=True, **user_info
).visit()
def setUp(self, is_staff=False):
"""
Install a course with no content using a fixture.
"""
super(CourseTeamPageTest, self).setUp(is_staff)
self.other_user = self._make_user('other')
self.dashboard_page = DashboardPage(self.browser)
self.page = CourseTeamPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _go_to_course_team_page(self):
""" Opens Course Team page """
self.page.visit()
self.page.wait_until_no_loading_indicator()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = CourseTeamPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _assert_current_course(self, visible=True):
""" Checks if current course is accessible to current user """
self.dashboard_page.visit()
courses = self.dashboard_page.list_courses()
def check_course_equality(course1, course2):
""" Compares to course dictionaries using org, number and run as keys"""
return (
course1['org'] == course2['display_organization'] and
course1['number'] == course2['display_coursenumber'] and
course1['run'] == course2['run']
)
actual_visible = any((check_course_equality(course, self.course_info) for course in courses))
self.assertEqual(actual_visible, visible)
def _assert_user_present(self, user, present=True):
""" Checks if specified user present on Course Team page """
if present:
self.page.wait_for(
lambda: user.get('username') in self.page.usernames,
description="Wait for user to be present"
)
else:
self.page.wait_for(
lambda: user.get('username') not in self.page.usernames,
description="Wait for user to be absent"
)
def _should_see_dialog(self, dialog_type, dialog_message):
""" Asserts dialog with specified message is shown """
self.page.modal_dialog_visible(dialog_type)
self.assertIn(dialog_message, self.page.modal_dialog_text(dialog_type))
def _assert_is_staff(self, user, can_manage=True):
""" Checks if user have staff permissions, can be promoted and can't be demoted """
self.assertIn("staff", user.role_label.lower())
if can_manage:
self.assertTrue(user.can_promote)
self.assertFalse(user.can_demote)
self.assertIn("Add Admin Access", user.promote_button_text)
def _assert_is_admin(self, user):
""" Checks if user have admin permissions, can't be promoted and can be demoted """
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertTrue(user.can_demote)
self.assertIn("Remove Admin Access", user.demote_button_text)
def _assert_can_manage_users(self):
""" Checks if current user can manage course team """
self.assertTrue(self.page.has_add_button)
for user in self.page.users:
self.assertTrue(user.can_promote or user.can_demote) # depending on actual user role
self.assertTrue(user.can_delete)
def _assert_can_not_manage_users(self):
""" Checks if current user can't manage course team """
self.assertFalse(self.page.has_add_button)
for user in self.page.users:
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
def test_admins_can_add_other_users(self):
"""
Scenario: Admins can add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
Then he does see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
def test_added_users_cannot_add_or_delete_other_users(self):
"""
Scenario: Added users cannot delete or add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
And he selects the new course
And he views the course team settings
Then he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
self._go_to_course_team_page()
bob = self.page.get_user(self.other_user.get('email'))
self.assertTrue(bob.is_current_user)
self.assertFalse(self.page.has_add_button)
self._assert_can_not_manage_users()
def test_admins_can_delete_other_users(self):
"""
Scenario: Admins can delete other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I delete other user from the course team
And other user logs in
Then he does not see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.page.delete_user_from_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=False)
self.log_in(self.other_user)
self._assert_current_course(visible=False)
def test_admins_can_delete_other_inactive_users(self):
"""
Scenario: Admins can delete other inactive users
Given I have opened a new course in Studio
And I am viewing the course team settings.
When I add other user to the course team,
And then delete that other user from the course team.
And other user logs in
Then he/she does not see the course on page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
# inactivate user
user_info = {
'username': self.other_user.get('username'),
'email': self.other_user.get('email'),
'password': self.other_user.get('password'),
'is_active': False
}
self._update_user(user_info)
# go to course team page to perform delete operation
self._go_to_course_team_page()
self.page.delete_user_from_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=False)
def test_admins_cannot_add_users_that_do_not_exist(self):
"""
Scenario: Admins cannot add users that do not exist
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add "dennis" to the course team
Then I should see "Could not find user by email address" somewhere on the page
"""
self.page.add_user_to_course("dennis@example.com")
self._should_see_dialog('error', "Could not find user by email address")
def test_admins_should_be_able_to_make_other_people_into_admins(self):
"""
Scenario: Admins should be able to make other people into admins
Given I have opened a new course in Studio
And I am viewing the course team settings
And I add other user to the course team
When I make other user a course team admin
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should be marked as an admin
And he can manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
self._assert_is_admin(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
def test_admins_should_be_able_to_remove_other_admins(self):
"""
Scenario: Admins should be able to remove other admins
Given I have opened a new course in Studio
And I grant admin rights to other user
Then he can add, delete, promote and demote users
And I am viewing the course team settings
When I remove admin rights from other user
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should not be marked as an admin
And he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
# precondition check - frank is an admin and can add/delete/promote/demote users
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
self.log_in(self.user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
other.click_demote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_not_manage_users()
def test_admins_should_be_able_to_remove_themself_if_other_admin_exists(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
And I'm the only course admin
Then I cannot delete or demote myself
When I add other user to the course team
And I make other user a course team admin
Then I can delete or demote myself
When I delete myself from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_delete()
self.log_in(self.user)
self._assert_current_course(visible=False)
def test_admins_should_be_able_to_give_course_ownership_to_someone_else(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I make other user a course team admin
When I remove admin rights from myself
Then I should not be marked as an admin
And I cannot manage users
And I cannot make myself a course team admin
When other user logs in
And he selects the new course
And he views the course team settings
And he deletes me from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_demote()
self._refresh_page()
current = self.page.get_user(self.user.get('email'))
self._assert_is_staff(current, can_manage=False)
self._assert_can_not_manage_users()
self.assertFalse(current.can_promote)
self.log_in(self.other_user)
self._go_to_course_team_page()
current = self.page.get_user(self.user.get('email'))
current.click_delete()
self._refresh_page()
self._assert_user_present(self.user, present=False)
self.log_in(self.user)
self._assert_current_course(visible=False)
|
kingsdigitallab/kdl-django | refs/heads/master | cms/migrations/0021_imagelistblock.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 11:39
from __future__ import unicode_literals
import cms.models.streamfield
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('cms', '0020_orderedlistblock_listblock'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='blogpost',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='indexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='organisationindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='organisationpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='personindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='personpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='richtextpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='workindexpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
migrations.AlterField(
model_name='workpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'banner', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image', required=False)), (b'image_copyright', wagtail.wagtailcore.blocks.CharBlock(icon='locked', required=False)), (b'style', cms.models.streamfield.BannerStyleChoiceBlock())], label='Banner section')), (b'ordered_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow'))])))], help_text='Use this for sections similar to process', label='Ordered list section')), (b'image_list', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(icon='title', required=False)), (b'items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([('title', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('subtitle', wagtail.wagtailcore.blocks.CharBlock(icon='title')), ('description', wagtail.wagtailcore.blocks.CharBlock(icon='pilcrow')), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(icon='image'))])))], label='Image list section')), (b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'person', wagtail.wagtailcore.blocks.StructBlock([(b'person', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='user')), (b'organisation', wagtail.wagtailcore.blocks.StructBlock([(b'organisation', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'description', wagtail.wagtailcore.blocks.RichTextBlock())], icon='group')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'projects', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.PageChooserBlock(), icon='pick', label='Featured projects')), (b'latest_blog_posts', wagtail.wagtailcore.blocks.BooleanBlock(icon='date', label='Show latest blog posts', required=True)), (b'twitter', wagtail.wagtailcore.blocks.CharBlock(icon='wagtail')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))]),
),
]
|
dahlstrom-g/intellij-community | refs/heads/master | python/testData/intentions/PyConvertToFStringIntentionTest/formatMethodIndexContainsAlternativeQuoteOfMultilineHost_after.py | 31 | f'''{d['"']}''' |
arcz/ansible-modules-core | refs/heads/devel | cloud/openstack/_quantum_floating_ip.py | 146 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip
version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Brad P. Crochet (@bcrochet)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Add/Remove floating IP from an instance
description:
- Add or Remove a floating IP to an instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network
required: true
default: None
instance_name:
description:
- The name of the instance to which the IP address should be assigned
required: true
default: None
internal_network_name:
description:
- The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks.
required: false
default: None
version_added: "1.5"
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Assign a floating ip to the instance from an external network
- quantum_floating_ip: state=present login_username=admin login_password=admin
login_tenant_name=admin network_name=external_network
instance_name=vm1 internal_network_name=internal_network
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'),
region_name=kwargs.get('region_name'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json( msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_info(neutron, module, instance_id, internal_network_name=None):
subnet_id = None
if internal_network_name:
kwargs = {'name': internal_network_name}
networks = neutron.list_networks(**kwargs)
network_id = networks['networks'][0]['id']
kwargs = {
'network_id': network_id,
'ip_version': 4
}
subnets = neutron.list_subnets(**kwargs)
subnet_id = subnets['subnets'][0]['id']
kwargs = {
'device_id': instance_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if subnet_id:
port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id)
port_id = port['id']
fixed_ip_address = port['fixed_ips'][0]['ip_address']
else:
port_id = ports['ports'][0]['id']
fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address']
if not ports['ports']:
return None, None
return fixed_ip_address, port_id
def _get_floating_ip(module, neutron, fixed_ip_address, network_name):
kwargs = {
'fixed_ip_address': fixed_ip_address
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
return None, None
for address in ips['floatingips']:
if _check_ips_network(neutron, address['floating_network_id'], network_name):
return address['id'], address['floating_ip_address']
return None, None
def _check_ips_network(neutron, net_id, network_name):
if neutron.show_network(net_id)['network']['name'] == network_name:
return True
else:
return False
def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip):
kwargs = {
'port_id': port_id,
'floating_network_id': net_id,
'fixed_ip_address': fixed_ip
}
try:
result = neutron.create_floatingip({'floatingip': kwargs})
except Exception, e:
module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address'])
def _get_net_id(neutron, module):
kwargs = {
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed=True, result=result)
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
network_name = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present']),
internal_network_name = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], region_name=module.params['region_name'], service_type='compute')
neutron = _get_neutron_client(module, module.params)
except Exception, e:
module.fail_json(msg="Error in authenticating to nova: %s" % e.message)
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg="The instance name provided cannot be found")
fixed_ip, port_id = _get_port_info(neutron, module, server_info['id'], module.params['internal_network_name'])
if not port_id:
module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned")
floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip, module.params['network_name'])
if module.params['state'] == 'present':
if floating_ip:
module.exit_json(changed = False, public_ip=floating_ip)
net_id = _get_net_id(neutron, module)
if not net_id:
module.fail_json(msg = "cannot find the network specified, please check")
_create_floating_ip(neutron, module, port_id, net_id, fixed_ip)
if module.params['state'] == 'absent':
if floating_ip:
_update_floating_ip(neutron, module, None, floating_id)
module.exit_json(changed=False)
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
bgxavier/nova | refs/heads/master | nova/api/openstack/compute/schemas/v3/disk_config.py | 81 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
server_create = {
'OS-DCF:diskConfig': {
'type': 'string',
'enum': ['AUTO', 'MANUAL']
}
}
|
guiraldelli/MSc | refs/heads/master | data_analyzer/data_analyzer.py | 1 | import pyparsing
import os.path
import logging
logging.basicConfig()
nodes_count = "Nodes: " + pyparsing.Word(pyparsing.nums)
edges_count = "Edges: " + pyparsing.Word(pyparsing.nums)
weigthed_edge = "(('" + pyparsing.Word(pyparsing.alphanums + "-_. ") + "', '" + pyparsing.Word(pyparsing.alphanums + "-_. ") + "'), " + pyparsing.Word(pyparsing.nums) + ")"
def process_filename(nexus_filename, extension=".txt", suffix_final="_final", suffix_mutational="_mutation", suffix_recombination="_recombination", suffix_data_analyzed="_data_analyzed"):
filename = None
if nexus_filename[-4:] == ".nex":
filename = nexus_filename[0:-4]
return (filename + suffix_final + extension, filename + suffix_mutational + extension, filename + suffix_recombination + extension, filename + suffix_data_analyzed + ".csv")
else:
return None
def process_file(filename):
logger = logging.getLogger('data_analyzer.process_file')
logger.setLevel(logging.INFO)
file = open(filename, 'r')
number_nodes = int(nodes_count.parseString(file.readline())[1])
number_edges = int(edges_count.parseString(file.readline())[1])
nodes = set()
edges = dict()
for edge in file.readlines():
logger.debug(edge)
parsed = weigthed_edge.parseString(edge)
nodes.add(parsed[1])
nodes.add(parsed[3])
edges[ parsed[1] + "," + parsed[3] ] = parsed[5]
if number_nodes != len(nodes):
message = 'For file %s, number of declared NODES (%d) is DIFFERENT from the number of EXISTING ones (%d).' % (filename, number_edges, len(edges))
logger.warn(message)
if number_edges != len(edges.keys()):
message = 'For file %s, number of declared EDGES (%d) is DIFFERENT from the number of EXISTING ones (%d).' % (filename, number_edges, len(edges))
logger.warn(message)
return (nodes, edges)
def process_nodes(final_nodes, mutation_nodes, recombination_nodes):
logger = logging.getLogger('data_analyzer.process_nodes')
logger.setLevel(logging.INFO)
mutation_and_recombination = 0
mutation_and_final = 0
recombination_and_final = 0
mutation_and_recombination_and_final = 0
nodes = set()
nodes = nodes.union(final_nodes, mutation_nodes, recombination_nodes)
for node in nodes:
if node in mutation_nodes and node in recombination_nodes:
mutation_and_recombination += 1
if node in final_nodes:
mutation_and_recombination_and_final += 1
mutation_and_final += 1
recombination_and_final += 1
else:
logger.error("Node " + node + " found but not in the final graph!")
elif node in mutation_nodes and node in final_nodes:
mutation_and_final += 1
elif node in recombination_nodes and node in final_nodes:
recombination_and_final += 1
else:
logger.error("Node " + node + " found but not in the final graph!")
ret = (len(mutation_nodes), len(recombination_nodes), len(final_nodes), mutation_and_recombination, mutation_and_final, recombination_and_final, mutation_and_recombination_and_final)
return ret
def process_edges(final_edges, mutation_edges, recombination_edges):
logger = logging.getLogger('data_analyzer.process_edges')
logger.setLevel(logging.INFO)
mutation_and_recombination = 0
mutation_and_final = 0
recombination_and_final = 0
mutation_and_recombination_and_final = 0
edges = set()
edges = edges.union(final_edges.keys(), mutation_edges.keys(), recombination_edges.keys())
for edge in edges:
if edge in mutation_edges.keys() and edge in recombination_edges.keys():
mutation_and_recombination += 1
if edge in final_edges.keys():
mutation_and_recombination_and_final += 1
mutation_and_final += 1
recombination_and_final += 1
else:
logger.error("Edge " + edge + " found but not in the final graph!")
elif edge in mutation_edges.keys() and edge in final_edges.keys():
mutation_and_final += 1
elif edge in recombination_edges.keys() and edge in final_edges.keys():
recombination_and_final += 1
else:
logger.error("Edge " + edge + " found but not in the final graph!")
ret = (len(mutation_edges.keys()), len(recombination_edges.keys()), len(final_edges.keys()), mutation_and_recombination, mutation_and_final, recombination_and_final, mutation_and_recombination_and_final)
return ret
# def process_edges(final_edges, mutation_edges, recombination_edges):
# data = { 'mutation':{'common':0, 'same':0, 'not':0, 'refused':0}, 'recombination':{'common':0, 'same':0, 'not':0, 'refused':0}}
# # data['mutation']['common'] = 0
# # data['mutation']['same'] = 0
# # data['mutation']['not'] = 0
# # data['mutation']['refused'] = 0
# # data['recombination']['common'] = 0
# # data['recombination']['same'] = 0
# # data['recombination']['not'] = 0
# # data['recombination']['refused'] = 0
# for edge in final_edges.keys():
# if edge in mutation_edges.keys():
# data['mutation']['common'] += 1
# if final_edges[edge] == mutation_edges[edge]:
# data['mutation']['same'] += 1
# else:
# data['mutation']['refused'] += 1
# else:
# data['mutation']['not'] += 1
# if edge in recombination_edges.keys():
# data['recombination']['common'] += 1
# if final_edges[edge] == recombination_edges[edge]:
# data['recombination']['same'] += 1
# else:
# data['recombination']['refused'] += 1
# else:
# data['recombination']['not'] += 1
# return data
def process_weights(final_edges, mutation_edges, recombination_edges):
logger = logging.getLogger('data_analyzer.process_weights')
logger.setLevel(logging.INFO)
mutation_and_recombination = 0
mutation_and_final = 0
recombination_and_final = 0
mutation_and_recombination_and_final = 0
edges = set()
edges = edges.union(final_edges.keys(), mutation_edges.keys(), recombination_edges.keys())
for edge in edges:
if edge in mutation_edges.keys() and edge in recombination_edges.keys() and mutation_edges[edge] == recombination_edges[edge]:
mutation_and_recombination += 1
if edge in final_edges.keys() and mutation_edges[edge] == final_edges[edge] and recombination_edges[edge] == final_edges[edge]:
mutation_and_recombination_and_final += 1
mutation_and_final += 1
recombination_and_final += 1
else:
logger.error("Edge " + edge + " has a value that is not equal to final, mutation and recombination when it should be!")
elif edge in mutation_edges.keys() and edge in final_edges.keys() and mutation_edges[edge] == final_edges[edge]:
mutation_and_final += 1
elif edge in recombination_edges.keys() and edge in final_edges.keys() and recombination_edges[edge] == final_edges[edge]:
recombination_and_final += 1
else:
logger.error("Edge " + edge + " found but its value not compatible when it should be!")
ret = (mutation_and_recombination, mutation_and_final, recombination_and_final, mutation_and_recombination_and_final)
return ret
def write_header(file):
header_file = '''"Nexus File"'''
header_nodes = '''"N(M)","N(R)","N(F)","N(M) and N(R)","N(M) and N(F)","N(R) and N(F)","N(M) and N(R) and N(F)"'''
header_edges = '''"E(M)","E(R)","E(F)","E(M) and E(R)","E(M) and E(F)","E(R) and E(F)","E(M) and E(R) and E(F)"'''
header_weights = '''"w(M) = w(R)","w(M) = w(F)","w(R) = w(F)","w(M) = w(R) = w(F)"'''
column_separator = ","
new_line = "\n"
file.write(header_file + column_separator + header_nodes + column_separator + header_edges + column_separator + header_weights + new_line)
file.flush()
def analyze_data(filepath_open, filepath_save):
logger = logging.getLogger('data_analyzer.analyze_data')
logger.setLevel(logging.INFO)
final_filename, mutation_filename, recombination_filename, data_analyzed_filename = process_filename(filepath_open)
# overwriting data_analyzed_filename
data_analyzed_filename = filepath_save
final_nodes, final_edges = process_file(final_filename)
mutation_nodes, mutation_edges = process_file(mutation_filename)
recombination_nodes, recombination_edges = process_file(recombination_filename)
data_analyzed = process_edges(final_edges, mutation_edges, recombination_edges)
# saving data
if os.path.exists(data_analyzed_filename):
file = open(data_analyzed_filename, 'a')
logger.warn("File '" + data_analyzed_filename + "' exists; appending data in this file.")
else:
file = open(data_analyzed_filename, 'w')
logger.info("Creating file '" + data_analyzed_filename + "'.")
write_header(file)
# getting information
nodes_info = process_nodes(final_nodes, mutation_nodes, recombination_nodes)
edges_info = process_edges(final_edges, mutation_edges, recombination_edges)
weights_info = process_weights(final_edges, mutation_edges, recombination_edges)
# writing to file
file.write('''"''' + filepath_open + '''",''')
for info in nodes_info:
file.write(str(info))
file.write(",")
for info in edges_info:
file.write(str(info))
file.write(",")
for info in weights_info:
file.write(str(info))
file.write(",")
file.write("\n")
# file.write("%d,%d,%d,%d,%d,%d,%d,") % (nodes_info[0], nodes_info[1], nodes_info[2], nodes_info[3], nodes_info[4], nodes_info[5], nodes_info[6])
# file.write("%d,%d,%d,%d,%d,%d,%d,") % (edges_info[0], edges_info[1], edges_info[2], edges_info[3], edges_info[4], edges_info[5], edges_info[6])
# file.write("%d,%d,%d,%d\n") % (weights_info[0], weights_info[1], weights_info[2], weights_info[3])
# # printing to screen
# print nodes_info
# print edges_info
# print weights_info
# file.write("'Graph','Node','Edges'")
# file.write("\n")
# file.write("'%s',%d,%d" % ("Final", len(final_nodes), len(final_edges.keys())))
# file.write("\n")
# file.write("'%s',%d,%d" % ("Mutation", len(mutation_nodes), len(mutation_edges.keys())))
# file.write("\n")
# file.write("'%s',%d,%d" % ("Recombination", len(recombination_nodes), len(recombination_edges.keys())))
# file.write("\n")
# file.write("\n")
# file.write("'Graph','Common Edges','Same Edges', 'Not Have Edges','Refused Edges'")
# file.write("\n")
# for graph_type in data_analyzed.keys():
# file.write("'%s'" % (graph_type.capitalize()))
# file.write(",%d" % (data_analyzed[graph_type]['common']))
# file.write(",%d" % (data_analyzed[graph_type]['same']))
# file.write(",%d" % (data_analyzed[graph_type]['not']))
# file.write(",%d" % (data_analyzed[graph_type]['refused']))
# for property in data_analyzed[graph_type].keys():
# logger.debug("data_analyzed[%s][%s] = %d" % (graph_type, property, data_analyzed[graph_type][property]))
# file.write("\n")
file.close()
logger.info("Data analysis has ended!")
|
fxfitz/ansible | refs/heads/devel | lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py | 51 | #!/usr/bin/python
#
# (c) Quentin Stafford-Fraser and Andy Baker 2015
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Create webfaction mailbox using Ansible and the Webfaction API
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_mailbox
short_description: Add or remove mailboxes on Webfaction
description:
- Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
mailbox_name:
description:
- The name of the mailbox
required: true
mailbox_password:
description:
- The password for the mailbox
required: true
state:
description:
- Whether the mailbox should exist
choices: ['present', 'absent']
default: "present"
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: Create a mailbox
webfaction_mailbox:
mailbox_name="mybox"
mailbox_password="myboxpw"
state=present
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec=dict(
mailbox_name=dict(required=True),
mailbox_password=dict(required=True, no_log=True),
state=dict(required=False, choices=['present', 'absent'], default='present'),
login_name=dict(required=True),
login_password=dict(required=True, no_log=True),
),
supports_check_mode=True
)
mailbox_name = module.params['mailbox_name']
site_state = module.params['state']
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
existing_mailbox = mailbox_name in mailbox_list
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a mailbox with this name already exist?
if existing_mailbox:
module.exit_json(changed=False,)
positional_args = [session_id, mailbox_name]
if not module.check_mode:
# If this isn't a dry run, create the mailbox
result.update(webfaction.create_mailbox(*positional_args))
elif site_state == 'absent':
# If the mailbox is already not there, nothing changed.
if not existing_mailbox:
module.exit_json(changed=False)
if not module.check_mode:
# If this isn't a dry run, delete the mailbox
result.update(webfaction.delete_mailbox(session_id, mailbox_name))
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(changed=True, result=result)
if __name__ == '__main__':
main()
|
venzozhang/GProject | refs/heads/master | src/core/examples/sample-rng-plot.py | 188 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
jenalgit/django | refs/heads/master | django/contrib/gis/db/models/fields.py | 310 | from django.contrib.gis import forms
from django.contrib.gis.db.models.lookups import gis_lookups
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.core.exceptions import ImproperlyConfigured
from django.db.models.expressions import Expression
from django.db.models.fields import Field
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeoSelectFormatMixin(object):
def select_format(self, compiler, sql, params):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super(BaseSpatialField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(BaseSpatialField, self).deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the BaseSpatialField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
units_name = self.units_name(connection)
# Some backends like MySQL cannot determine units name. In that case,
# test if srid is 4326 (WGS84), even if this is over-simplification.
return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326
def get_placeholder(self, value, compiler, connection):
"""
Returns the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
class GeometryField(GeoSelectFormatMixin, BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.")
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
def __init__(self, verbose_name=None, dim=2, geography=False, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# ### Routines specific to GeometryField ###
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, Expression):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def from_db_value(self, value, expression, connection, context):
if value and not isinstance(value, Geometry):
value = Geometry(value)
return value
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
# ### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(Geometry, self))
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
# special case for isnull lookup
if lookup_type == 'isnull':
return []
elif lookup_type in self.class_lookups:
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if self.class_lookups[lookup_type].distance:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, Expression):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'contains':
# 'contains' name might conflict with the "normal" contains lookup,
# for which the value is not prepared, but left as-is.
return self.get_prep_value(value)
return super(GeometryField, self).get_prep_lookup(lookup_type, value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
for klass in gis_lookups.values():
GeometryField.register_lookup(klass)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
def __init__(self, *args, **kwargs):
if not HAS_GDAL:
raise ImproperlyConfigured('RasterField requires GDAL.')
super(RasterField, self).__init__(*args, **kwargs)
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super(RasterField, self).db_type(connection)
def from_db_value(self, value, expression, connection, context):
return connection.ops.parse_raster(value)
def get_db_prep_value(self, value, connection, prepared=False):
self._check_connection(connection)
# Prepare raster for writing to database.
if not prepared:
value = connection.ops.deconstruct_raster(value)
return super(RasterField, self).get_db_prep_value(value, connection, prepared)
def contribute_to_class(self, cls, name, **kwargs):
super(RasterField, self).contribute_to_class(cls, name, **kwargs)
# Importing GDALRaster raises an exception on systems without gdal.
from django.contrib.gis.gdal import GDALRaster
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(GDALRaster, self))
|
lucafavatella/intellij-community | refs/heads/cli-wip | python/lib/Lib/site-packages/django/shortcuts/__init__.py | 71 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
kwargs['context_instance'] = kwargs.get('context_instance', RequestContext(request))
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
|
Wingless-Archangel/OWASP-ZSC | refs/heads/master | core/obfuscate.py | 4 | #!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
from core.alert import *
from core.compatible import version
def obf_code(lang, encode, filename, content,cli):
if version() is 3:
content = content.decode('utf-8')
start = getattr(
__import__('lib.encoder.%s.%s' % (lang, encode),
fromlist=['start']),
'start') #import endoing module
content = start(content,cli) #encoded content as returned value
if version() is 3:
content = bytes(content, 'utf-8')
f = open(filename, 'wb') #writing content
f.write(content)
f.close()
info('file "%s" encoded successfully!\n' % filename)
return
|
KAsante95/osf.io | refs/heads/develop | tests/test_oauth.py | 14 | from datetime import datetime
import httplib as http
import logging
import json
import time
import urlparse
import httpretty
from nose.tools import * # noqa
from framework.auth import authenticate
from framework.exceptions import PermissionsError, HTTPError
from framework.sessions import session
from website.oauth.models import (
ExternalAccount,
ExternalProvider,
OAUTH1,
OAUTH2,
)
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase
from tests.factories import (
AuthUserFactory,
ExternalAccountFactory,
MockOAuth2Provider,
UserFactory,
)
SILENT_LOGGERS = ['oauthlib', 'requests_oauthlib']
for logger in SILENT_LOGGERS:
logging.getLogger(logger).setLevel(logging.ERROR)
class MockOAuth1Provider(ExternalProvider):
_oauth_version = OAUTH1
name = "Mock OAuth 1.0a Provider"
short_name = "mock1a"
client_id = "mock1a_client_id"
client_secret = "mock1a_client_secret"
auth_url_base = "http://mock1a.com/auth"
request_token_url = "http://mock1a.com/request"
callback_url = "http://mock1a.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
def _prepare_mock_oauth2_handshake_response(expires_in=3600):
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body=json.dumps({
'access_token': 'mock_access_token',
'expires_at': time.time() + expires_in,
'expires_in': expires_in,
'refresh_token': 'mock_refresh_token',
'scope': ['all'],
'token_type': 'bearer',
}),
status=200,
content_type='application/json',
)
def _prepare_mock_500_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "not found"}',
status=503,
content_type='application/json',
)
def _prepare_mock_401_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "user denied access"}',
status=401,
content_type='application/json',
)
class TestExternalAccount(OsfTestCase):
# Test the ExternalAccount object and associated views.
#
# Functionality not specific to the OAuth version used by the
# ExternalProvider should go here.
def setUp(self):
super(TestExternalAccount, self).setUp()
self.user = AuthUserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalAccount, self).tearDown()
def test_disconnect(self):
# Disconnect an external account from a user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.append(external_account)
self.user.save()
# If the external account isn't attached, this test has no meaning
assert_equal(ExternalAccount.find().count(), 1)
assert_in(
external_account,
self.user.external_accounts,
)
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# external_account.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
def test_disconnect_with_multiple_connected(self):
# Disconnect an account connected to multiple users from one user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.append(external_account)
self.user.save()
other_user = UserFactory()
other_user.external_accounts.append(external_account)
other_user.save()
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
other_user.reload()
# External account is still associated with the other user
assert_in(
external_account,
other_user.external_accounts,
)
class TestExternalProviderOAuth1(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 1.0a
def setUp(self):
super(TestExternalProviderOAuth1, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth1Provider()
def tearDown(self):
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth1, self).tearDown()
@httpretty.activate
def test_start_flow(self):
# Request temporary credentials from provider, provide auth redirect
httpretty.register_uri(httpretty.POST, 'http://mock1a.com/request',
body='{"oauth_token_secret": "temp_secret", '
'"oauth_token": "temp_token", '
'"oauth_callback_confirmed": "true"}',
status=200,
content_type='application/json')
with self.app.app.test_request_context('/oauth/connect/mock1a/'):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# The URL to which the user would be redirected
assert_equal(url, "http://mock1a.com/auth?oauth_token=temp_token")
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_equal(creds['token'], 'temp_token')
assert_equal(creds['secret'], 'temp_secret')
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body=(
'oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true'
),
)
user = UserFactory()
# Fake a request context for the callback
ctx = self.app.app.test_request_context(
path='/oauth/callback/mock1a/',
query_string='oauth_token=temp_key&oauth_verifier=mock_verifier',
)
with ctx:
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'token': 'temp_key',
'secret': 'temp_secret',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'perm_token')
assert_equal(account.oauth_secret, 'perm_secret')
assert_equal(account.provider_id, 'mock_provider_id')
assert_equal(account.provider_name, 'Mock OAuth 1.0a Provider')
@httpretty.activate
def test_callback_wrong_user(self):
# Reject temporary credentials not assigned to the user
#
# This prohibits users from associating their external account with
# another user's OSF account by using XSS or similar attack vector to
# complete the OAuth flow using the logged-in user but their own account
# on the external service.
#
# If the OSF were to allow login via OAuth with the provider in question,
# this would allow attackers to hijack OSF accounts with a simple script
# injection.
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body='oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true',
)
user = UserFactory()
account = ExternalAccountFactory(
provider="mock1a",
provider_name='Mock 1A',
oauth_key="temp_key",
oauth_secret="temp_secret",
temporary=True
)
account.save()
# associate this ExternalAccount instance with the user
user.external_accounts.append(account)
user.save()
malicious_user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock1a/",
query_string="oauth_token=temp_key&oauth_verifier=mock_verifier"
):
# make sure the user is logged in
authenticate(user=malicious_user, access_token=None, response=None)
with assert_raises(PermissionsError):
# do the key exchange
self.provider.auth_callback(user=malicious_user)
class TestExternalProviderOAuth2(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 2.0
def setUp(self):
super(TestExternalProviderOAuth2, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth2, self).tearDown()
def test_oauth_version_default(self):
# OAuth 2.0 is the default version
assert_is(self.provider._oauth_version, OAUTH2)
def test_start_flow(self):
# Generate the appropriate URL and state token
with self.app.app.test_request_context("/oauth/connect/mock2/"):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_in('state', creds)
# The URL to which the user would be redirected
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
# check parameters
assert_equal(
params,
{
'state': [creds['state']],
'response_type': ['code'],
'client_id': [self.provider.client_id],
'redirect_uri': [
web_url_for('oauth_callback',
service_name=self.provider.short_name,
_absolute=True)
]
}
)
# check base URL
assert_equal(
url.split("?")[0],
"https://mock2.com/auth",
)
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'mock_access_token')
assert_equal(account.provider_id, 'mock_provider_id')
@httpretty.activate
def test_provider_down(self):
# Create a 500 error
_prepare_mock_500_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
with assert_raises(HTTPError) as error_raised:
self.provider.auth_callback(user=user)
assert_equal(
error_raised.exception.code,
503,
)
@httpretty.activate
def test_user_denies_access(self):
# Create a 401 error
_prepare_mock_401_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="error=mock_error&code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
assert_false(self.provider.auth_callback(user=user))
@httpretty.activate
def test_multiple_users_associated(self):
# Create only one ExternalAccount for multiple OSF users
#
# For some providers (ex: GitHub), the act of completing the OAuth flow
# revokes previously generated credentials. In addition, there is often no
# way to know the user's id on the external service until after the flow
# has completed.
#
# Having only one ExternalAccount instance per account on the external
# service means that connecting subsequent OSF users to the same external
# account will not invalidate the credentials used by the OSF for users
# already associated.
user_a = UserFactory()
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
user_a.external_accounts.append(external_account)
user_a.save()
user_b = UserFactory()
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user_b, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user_b)
user_a.reload()
user_b.reload()
external_account.reload()
assert_equal(
user_a.external_accounts,
user_b.external_accounts,
)
assert_equal(
ExternalAccount.find().count(),
1
)
@httpretty.activate
def test_force_refresh_oauth_key(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=True)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_not_need_refresh(self):
self.provider.refresh_time = 1
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
refresh_token='old_refresh',
expires_at=datetime.utcfromtimestamp(time.time() + 200),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
# .reload() has the side effect of rounding the microsends down to 3 significant figures
# (e.g. DT(YMDHMS, 365420) becomes DT(YMDHMS, 365000)),
# but must occur after possible refresh to reload tokens.
# Doing so before allows the `old_expiry == EA.expires_at` comparison to work.
external_account.reload()
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'old_key')
assert_equal(external_account.refresh_token, 'old_refresh')
assert_equal(external_account.expires_at, old_expiry)
@httpretty.activate
def test_refresh_oauth_key_does_not_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=0 # causes `.needs_refresh()` to return False
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
self.provider.account = external_account
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_with_broken_provider(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200)
)
self.provider.client_id = None
self.provider.client_secret = None
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_without_account_or_refresh_url(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() + 200)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
|
cbgaindia/parsers | refs/heads/master | keywords_extractor.py | 1 | 'Class for extracting keywords for PDF Documents in a directory'
import csv
import glob,os
import logging
from logging.config import fileConfig
from lxml import etree
import re
import time
DOC_DIR = "union_budgets/2015-16/Expenditure Budget/Volume II/"
OUT_FILE = "union_budgets/2015-16/expenditure_budget_keywords_map.csv"
OUT_CSV_HEADER = ["Department", "Keywords"]
TEMP_INDEX_FILE = "/tmp/page.html"
TEMP_HTML_FILE = "/tmp/pages.html"
LOG_FILE = "/tmp/log"
SKIP_WORDS = ["total", "b. investment in public enterprises", "c. plan outlay", "other programmes", "grand total", "central plan", "state plan", "union territory plans", "union territory plans (with legislature)"]
DEFAULT_KEYWORD_XPATH = "//b/text()|//i/text()"
fileConfig('parsers/logging_config.ini')
logger = logging.getLogger()
class KeywordsExtractor(object):
def __init__(self):
self.skip_words = SKIP_WORDS
self.department_name = ""
def extract_bold_keywords(self):
'''Extract Bold keywords from all PDF documents in the directory and generate a CSV mapping
'''
with open(OUT_FILE, "wb") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(OUT_CSV_HEADER)
for file_name in glob.glob("%s*.pdf" % DOC_DIR):
try:
self.department_name = os.path.basename(file_name).lower().split(".pdf")[0].decode('utf-8')
bold_text_phrases = self.get_bold_text_phrases(file_name)
csv_writer.writerow([os.path.basename(file_name).split(".pdf")[0].decode('utf-8'), str(bold_text_phrases)])
logger.info("Processing PDF document for department: %s" % self.department_name)
except Exception, error_message:
logger.error("Unable to extract keywords for department: %s, error_message: %s" % (self.department_name, error_message))
def get_bold_text_phrases(self, file_name, keyword_xpath=DEFAULT_KEYWORD_XPATH,is_other_starting_phrases=False, single_word=False, page_num=None, lower_case=True):
'''Extract bold text phrases from input HTML object
'''
html_obj = self.get_html_object(file_name, page_num)
dom_tree = etree.HTML(html_obj.read())
bold_text_phrases = []
previous_keyword = None
for phrase in dom_tree.xpath(keyword_xpath):
phrase = self.clean_extracted_phrase(phrase, is_other_starting_phrases, lower_case)
if re.search(r'^no. [0-9]+/|^no. [0-9]+|^total-|^total -', phrase) or phrase == self.department_name.encode('utf-8'):
continue
if phrase in self.skip_words and not is_other_starting_phrases:
continue
if re.search(r'[A-Za-z]{2,}', phrase):
if not phrase in bold_text_phrases:
if not single_word and not len(phrase.split(" ")) > 1:
continue
bold_text_phrases.append(phrase.strip())
return bold_text_phrases
def clean_extracted_phrase(self, phrase, is_other_starting_phrases, lower_case):
'''Cleanse phrase text to remove unwanted characters and words
'''
if lower_case:
phrase = phrase.lower()
phrase = phrase.encode('utf-8').replace('\xa0', ' ').replace('\xc2', '').strip()
phrase = re.sub(r'\s{2,}', ' ', phrase)
if not is_other_starting_phrases:
phrase = re.sub(r'[^a-zA-Z\d\)]$', '', phrase)
phrase = re.sub(r', ETC.$|, etc.$', '', phrase)
phrase = re.sub(r'^other ', '', phrase).strip()
return phrase
def get_html_object(self, file_name, page_num):
'''Convert PDF file into HTML file using pdftohtml(http://sourceforge.net/projects/pdftohtml/)
'''
file_stub = re.sub(r'\s', '_', os.path.basename(file_name).split(".pdf")[0].lower().strip())
index_file = TEMP_INDEX_FILE.replace(".html", "_%s.html" % file_stub)
html_file = TEMP_INDEX_FILE.replace(".html", "_%ss.html" % file_stub)
if page_num:
command = "pdftohtml -f '%s' -l '%s' '%s' '%s' > %s" % (page_num, page_num, file_name, index_file, LOG_FILE)
else:
command = "pdftohtml '%s' '%s' > %s" % (file_name, index_file, LOG_FILE)
os.system(command)
html_obj = open(html_file, "rb")
return html_obj
if __name__ == '__main__':
obj = KeywordsExtractor()
obj.extract_bold_keywords()
|
joegomes/deepchem | refs/heads/master | examples/delaney/delaney_datasets.py | 2 | """
Delaney dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
import deepchem as dc
def load_delaney(featurizer='ECFP', split='index'):
"""Load delaney datasets."""
# Featurize Delaney dataset
print("About to featurize Delaney dataset.")
current_dir = os.path.dirname(os.path.realpath(__file__))
dataset_file = os.path.join(
current_dir, "../../datasets/delaney-processed.csv")
delaney_tasks = ['measured log solubility in mols per litre']
if featurizer == 'ECFP':
featurizer = dc.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = dc.feat.ConvMolFeaturizer()
loader = dc.data.CSVLoader(
tasks=delaney_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(
dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter(),
'scaffold': dc.splits.ScaffoldSplitter()}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return delaney_tasks, (train, valid, test), transformers
|
tima/beets | refs/heads/master | beetsplug/duplicates.py | 14 | # This file is part of beets.
# Copyright 2015, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess
from beets.library import Item, Album
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super(DuplicatesPlugin, self).__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option('-c', '--count', dest='count',
action='store_true',
help='show duplicate counts')
self._command.parser.add_option('-C', '--checksum', dest='checksum',
action='store', metavar='PROG',
help='report duplicates based on'
' arbitrary command')
self._command.parser.add_option('-d', '--delete', dest='delete',
action='store_true',
help='delete items from library and '
'disk')
self._command.parser.add_option('-F', '--full', dest='full',
action='store_true',
help='show all versions of duplicate'
' tracks or albums')
self._command.parser.add_option('-s', '--strict', dest='strict',
action='store_true',
help='report duplicates only if all'
' attributes are set')
self._command.parser.add_option('-k', '--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
help='report duplicates based on keys')
self._command.parser.add_option('-M', '--merge', dest='merge',
action='store_true',
help='merge duplicate items')
self._command.parser.add_option('-m', '--move', dest='move',
action='store', metavar='DEST',
help='move items to dest')
self._command.parser.add_option('-o', '--copy', dest='copy',
action='store', metavar='DEST',
help='copy items to dest')
self._command.parser.add_option('-t', '--tag', dest='tag',
action='store',
help='tag matched items with \'k=v\''
' attribute')
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = self.config['copy'].get(str)
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].get(list)
merge = self.config['merge'].get(bool)
move = self.config['move'].get(str)
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
if path:
fmt = '$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, copy=True)
item.store()
if move:
item.move(basedir=move, copy=False)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except:
raise UserError('%s: can\'t parse k=v tag: %s' % (PLUGIN, tag))
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=item.path) for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(u'key {0} on item {1} not cached:'
'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
self._log.debug(u'computed checksum for {0} using {1}',
item.title, key)
except subprocess.CalledProcessError as e:
self._log.debug(u'failed to checksum {0}: {1}',
displayable_path(item.path), e)
else:
self._log.debug(u'key {0} on item {1} cached:'
'not computing checksum',
key, displayable_path(item.path))
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
if strict and len(values) < len(keys):
self._log.debug(u'some keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
elif (not strict and not len(values)):
self._log.debug(u'all keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
if tiebreak:
kind = 'items' if all(isinstance(o, Item)
for o in objs) else 'albums'
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, unicode) else True)
fields = kind.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = [f for sublist in Item.get_fields() for f in sublist]
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ''):
value = getattr(o, f, None)
if value:
self._log.debug(u'key {0} on item {1} is null '
'or empty: setting from item {2}',
f, displayable_path(objs[0].path),
displayable_path(o.path))
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug(u'item {0} missing from album {1}:'
' merging from {2} into {3}',
missing,
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(copy=True)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).iteritems():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])
|
jiadaizhao/LeetCode | refs/heads/master | 1301-1400/1392-Longest Happy Prefix/1392-Longest Happy Prefix.py | 1 | class Solution:
def longestPrefix(self, s: str) -> str:
lps = [0] * len(s)
l = 0
i = 1
while i < len(s):
if s[i] == s[l]:
lps[i] = l + 1
i += 1
l += 1
elif l != 0:
l = lps[l - 1]
else:
i += 1
return s[0: lps[-1]]
|
openhatch/oh-mainline | refs/heads/master | vendor/packages/twisted/twisted/internet/iocpreactor/const.py | 84 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows constants for IOCP
"""
# this stuff should really be gotten from Windows headers via pyrex, but it
# probably is not going to change
ERROR_PORT_UNREACHABLE = 1234
ERROR_NETWORK_UNREACHABLE = 1231
ERROR_CONNECTION_REFUSED = 1225
ERROR_IO_PENDING = 997
ERROR_OPERATION_ABORTED = 995
WAIT_TIMEOUT = 258
ERROR_NETNAME_DELETED = 64
ERROR_HANDLE_EOF = 38
INFINITE = -1
SO_UPDATE_CONNECT_CONTEXT = 0x7010
SO_UPDATE_ACCEPT_CONTEXT = 0x700B
|
allotria/intellij-community | refs/heads/master | python/testData/quickFixes/PyAddImportQuickFixTest/fullFromImportSourceNameInSuggestion/main_after.py | 18 | from foo.bar.baz import ClassA, ClassB
print(ClassA, ClassB)
|
azumimuo/family-xbmc-addon | refs/heads/master | plugin.video.bubbles/resources/lib/externals/hachoir/hachoir_parser/program/exe.py | 1 | """
Microsoft Windows Portable Executable (PE) file parser.
Informations:
- Microsoft Portable Executable and Common Object File Format Specification:
http://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx
Author: Victor Stinner
Creation date: 2006-08-13
"""
from resources.lib.externals.hachoir.hachoir_parser import HachoirParser
from resources.lib.externals.hachoir.hachoir_core.endian import LITTLE_ENDIAN
from resources.lib.externals.hachoir.hachoir_core.field import (FieldSet, RootSeekableFieldSet,
UInt16, UInt32, String,
RawBytes, PaddingBytes)
from resources.lib.externals.hachoir.hachoir_core.text_handler import textHandler, hexadecimal
from resources.lib.externals.hachoir.hachoir_parser.program.exe_ne import NE_Header
from resources.lib.externals.hachoir.hachoir_parser.program.exe_pe import PE_Header, PE_OptHeader, SectionHeader
from resources.lib.externals.hachoir.hachoir_parser.program.exe_res import PE_Resource, NE_VersionInfoNode
MAX_NB_SECTION = 50
class MSDosHeader(FieldSet):
static_size = 64*8
def createFields(self):
yield String(self, "header", 2, "File header (MZ)", charset="ASCII")
yield UInt16(self, "size_mod_512", "File size in bytes modulo 512")
yield UInt16(self, "size_div_512", "File size in bytes divide by 512")
yield UInt16(self, "reloc_entries", "Number of relocation entries")
yield UInt16(self, "code_offset", "Offset to the code in the file (divided by 16)")
yield UInt16(self, "needed_memory", "Memory needed to run (divided by 16)")
yield UInt16(self, "max_memory", "Maximum memory needed to run (divided by 16)")
yield textHandler(UInt32(self, "init_ss_sp", "Initial value of SP:SS registers"), hexadecimal)
yield UInt16(self, "checksum", "Checksum")
yield textHandler(UInt32(self, "init_cs_ip", "Initial value of CS:IP registers"), hexadecimal)
yield UInt16(self, "reloc_offset", "Offset in file to relocation table")
yield UInt16(self, "overlay_number", "Overlay number")
yield PaddingBytes(self, "reserved[]", 8, "Reserved")
yield UInt16(self, "oem_id", "OEM id")
yield UInt16(self, "oem_info", "OEM info")
yield PaddingBytes(self, "reserved[]", 20, "Reserved")
yield UInt32(self, "next_offset", "Offset to next header (PE or NE)")
def isValid(self):
if 512 <= self["size_mod_512"].value:
return "Invalid field 'size_mod_512' value"
if self["code_offset"].value < 4:
return "Invalid code offset"
looks_pe = self["size_div_512"].value < 4
if looks_pe:
if self["checksum"].value != 0:
return "Invalid value of checksum"
if not (80 <= self["next_offset"].value <= 1024):
return "Invalid value of next_offset"
return ""
class ExeFile(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "exe",
"category": "program",
"file_ext": ("exe", "dll", "ocx"),
"mime": (u"application/x-dosexec",),
"min_size": 64*8,
#"magic": (("MZ", 0),),
"magic_regex": (("MZ.[\0\1].{4}[^\0\1\2\3]", 0),),
"description": "Microsoft Windows Portable Executable"
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, 2) != 'MZ':
return "Wrong header"
err = self["msdos"].isValid()
if err:
return "Invalid MSDOS header: "+err
if self.isPE():
if MAX_NB_SECTION < self["pe_header/nb_section"].value:
return "Invalid number of section (%s)" \
% self["pe_header/nb_section"].value
return True
def createFields(self):
yield MSDosHeader(self, "msdos", "MS-DOS program header")
if self.isPE() or self.isNE():
offset = self["msdos/next_offset"].value
self.seekByte(offset, relative=False)
if self.isPE():
for field in self.parsePortableExecutable():
yield field
elif self.isNE():
for field in self.parseNE_Executable():
yield field
else:
offset = self["msdos/code_offset"].value * 16
self.seekByte(offset, relative=False)
def parseNE_Executable(self):
yield NE_Header(self, "ne_header")
# FIXME: Compute resource offset instead of using searchBytes()
# Ugly hack to get find version info structure
start = self.current_size
addr = self.stream.searchBytes('VS_VERSION_INFO', start)
if addr:
self.seekBit(addr-32)
yield NE_VersionInfoNode(self, "info")
def parsePortableExecutable(self):
# Read PE header
yield PE_Header(self, "pe_header")
# Read PE optional header
size = self["pe_header/opt_hdr_size"].value
rsrc_rva = None
if size:
yield PE_OptHeader(self, "pe_opt_header", size=size*8)
if "pe_opt_header/resource/rva" in self:
rsrc_rva = self["pe_opt_header/resource/rva"].value
# Read section headers
sections = []
for index in xrange(self["pe_header/nb_section"].value):
section = SectionHeader(self, "section_hdr[]")
yield section
if section["phys_size"].value:
sections.append(section)
# Read sections
sections.sort(key=lambda field: field["phys_off"].value)
for section in sections:
self.seekByte(section["phys_off"].value)
size = section["phys_size"].value
if size:
name = section.createSectionName()
if rsrc_rva is not None and section["rva"].value == rsrc_rva:
yield PE_Resource(self, name, section, size=size*8)
else:
yield RawBytes(self, name, size)
def isPE(self):
if not hasattr(self, "_is_pe"):
self._is_pe = False
offset = self["msdos/next_offset"].value * 8
if 2*8 <= offset \
and (offset+PE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 4) == 'PE\0\0':
self._is_pe = True
return self._is_pe
def isNE(self):
if not hasattr(self, "_is_ne"):
self._is_ne = False
offset = self["msdos/next_offset"].value * 8
if 64*8 <= offset \
and (offset+NE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 2) == 'NE':
self._is_ne = True
return self._is_ne
def getResource(self):
# MS-DOS program: no resource
if not self.isPE():
return None
# Check if PE has resource or not
if "pe_opt_header/resource/size" in self:
if not self["pe_opt_header/resource/size"].value:
return None
if "section_rsrc" in self:
return self["section_rsrc"]
return None
def createDescription(self):
if self.isPE():
if self["pe_header/is_dll"].value:
text = u"Microsoft Windows DLL"
else:
text = u"Microsoft Windows Portable Executable"
info = [self["pe_header/cpu"].display]
if "pe_opt_header" in self:
hdr = self["pe_opt_header"]
info.append(hdr["subsystem"].display)
if self["pe_header/is_stripped"].value:
info.append(u"stripped")
return u"%s: %s" % (text, ", ".join(info))
elif self.isNE():
return u"New-style Executable (NE) for Microsoft MS Windows 3.x"
else:
return u"MS-DOS executable"
def createContentSize(self):
if self.isPE():
size = 0
for index in xrange(self["pe_header/nb_section"].value):
section = self["section_hdr[%u]" % index]
section_size = section["phys_size"].value
if not section_size:
continue
section_size = (section_size + section["phys_off"].value) * 8
if size:
size = max(size, section_size)
else:
size = section_size
if size:
return size
else:
return None
elif self.isNE():
# TODO: Guess NE size
return None
else:
size = self["msdos/size_mod_512"].value + (self["msdos/size_div_512"].value-1) * 512
if size < 0:
return None
return size*8
|
gijzelaerr/djonet | refs/heads/master | tests/testapp/models.py | 2 | # Copyright (c) 2009 - 2010, Mark Bucciarelli <mkbucc@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from django.db import models
class Simple(models.Model):
name = models.CharField(max_length=25, unique=True)
def __unicode__(self):
return self.name
class JustAsSimple(models.Model):
name = models.CharField(max_length=25, unique=True)
def __unicode__(self):
return self.name
class Parent(models.Model):
'''For testing cascading deletes.'''
name = models.CharField(max_length=25, unique=True)
simple = models.ForeignKey(Simple)
class Aunt(models.Model):
'''For testing cascading deletes.'''
name = models.CharField(max_length=25, unique=True)
simple = models.ForeignKey(Simple)
class GrandParent(models.Model):
'''For testing cascading deletes.'''
name = models.CharField(max_length=25, unique=True)
parent = models.ForeignKey(Parent)
class CommonFieldTypes(models.Model):
'''All fields and options listed in Django 1.1-beta docs are below.
This class tests some of the more common setups, where common is
based on a sample size of one. ;)
Field options
null
blank
choices
db_column
db_index
db_tablespace
default
editable
help_text
primary_key
unique
unique_for_date
unique_for_month
unique_for_year
verbose_name
Field types
AutoField
BooleanField
CharField
CommaSeparatedIntegerField
DateField
DateTimeField
DecimalField
EmailField
FileField
FilePathField
FloatField
ImageField
IntegerField
IPAddressField
NullBooleanField
PositiveIntegerField
PositiveSmallIntegerField
SlugField
SmallIntegerField
TextField
TimeField
URLField
XMLField
Relationship fields
ForeignKey
ManyToManyField
OneToOneField
'''
GENDER_TYPE = (
( 'M', 'Male'),
( 'F', 'Female'),
( 'N', 'Neutral'),
)
choice = models.CharField(choices=GENDER_TYPE, max_length = 1)
null_and_blank_string = models.CharField('Blank String',
max_length = 50, blank = True, null = True)
blank_text = models.TextField(blank=True)
simple = models.ForeignKey(Simple)
created = models.DateTimeField('created', auto_now_add=True)
url = models.URLField(max_length=500, blank=True)
weight = models.PositiveIntegerField()
sdt = models.DateField('Start Date', blank=True, default="")
seasonal = models.BooleanField(default=False, db_index=True)
amt = models.FloatField()
empty_amt = models.FloatField(blank=True, null=True)
many_to_many = models.ManyToManyField(JustAsSimple)
class Meta:
ordering = ('choice',)
unique_together = ('url', 'sdt', 'amt')
def __unicode__(self):
return '%s' % (self.id,)
|
ZazieTheBeast/oscar | refs/heads/master | src/oscar/core/logging/formatters.py | 30 | import re
from logging import Formatter
class PciFormatter(Formatter):
"""
Strip card numbers out of log messages to avoid leaving sensitive
information in the logs.
"""
def format(self, record):
s = Formatter.format(self, record)
return re.sub(r'\d[ \d-]{15,22}', 'XXXX-XXXX-XXXX-XXXX', s)
|
bckwltn/SickRage | refs/heads/master | sickbeard/clients/deluge.py | 2 | # Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import json
from base64 import b64encode
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
class DelugeAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DelugeAPI, self).__init__('Deluge', host, username, password)
self.url = self.host + 'json'
def _get_auth(self):
post_data = json.dumps({"method": "auth.login",
"params": [self.password],
"id": 1
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), verify=sickbeard.TORRENT_VERIFY_CERT)
except:
return None
self.auth = self.response.json()["result"]
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), verify=sickbeard.TORRENT_VERIFY_CERT)
except:
return None
connected = self.response.json()['result']
if not connected:
post_data = json.dumps({"method": "web.get_hosts",
"params": [],
"id": 11
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), verify=sickbeard.TORRENT_VERIFY_CERT)
except:
return None
hosts = self.response.json()['result']
if len(hosts) == 0:
logger.log(self.name + u': WebUI does not contain daemons', logger.ERROR)
return None
post_data = json.dumps({"method": "web.connect",
"params": [hosts[0][0]],
"id": 11
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), verify=sickbeard.TORRENT_VERIFY_CERT)
except:
return None
post_data = json.dumps({"method": "web.connected",
"params": [],
"id": 10
})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), verify=sickbeard.TORRENT_VERIFY_CERT)
except:
return None
connected = self.response.json()['result']
if not connected:
logger.log(self.name + u': WebUI could not connect to daemon', logger.ERROR)
return None
return self.auth
def _add_torrent_uri(self, result):
post_data = json.dumps({"method": "core.add_torrent_magnet",
"params": [result.url, {"move_completed": "true",
"move_completed_path": sickbeard.TV_DOWNLOAD_DIR}],
"id": 2
})
self._request(method='post', data=post_data)
result.hash = self.response.json()['result']
return self.response.json()['result']
def _add_torrent_file(self, result):
post_data = json.dumps({"method": "core.add_torrent_file",
"params": [result.name + '.torrent', b64encode(result.content),
{"move_completed": "true",
"move_completed_path": sickbeard.TV_DOWNLOAD_DIR}],
"id": 2
})
self._request(method='post', data=post_data)
result.hash = self.response.json()['result']
return self.response.json()['result']
def _set_torrent_label(self, result):
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if ' ' in label:
logger.log(self.name + u': Invalid label. Label must not contain a space', logger.ERROR)
return False
if label:
# check if label already exists and create it if not
post_data = json.dumps({"method": 'label.get_labels',
"params": [],
"id": 3
})
self._request(method='post', data=post_data)
labels = self.response.json()['result']
if labels != None:
if label not in labels:
logger.log(self.name + ': ' + label + u" label does not exist in Deluge we must add it",
logger.DEBUG)
post_data = json.dumps({"method": 'label.add',
"params": [label],
"id": 4
})
self._request(method='post', data=post_data)
logger.log(self.name + ': ' + label + u" label added to Deluge", logger.DEBUG)
# add label to torrent
post_data = json.dumps({"method": 'label.set_torrent',
"params": [result.hash, label],
"id": 5
})
self._request(method='post', data=post_data)
logger.log(self.name + ': ' + label + u" label added to torrent", logger.DEBUG)
else:
logger.log(self.name + ': ' + u"label plugin not detected", logger.DEBUG)
return False
return not self.response.json()['error']
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
if ratio:
post_data = json.dumps({"method": "core.set_torrent_stop_at_ratio",
"params": [result.hash, True],
"id": 5
})
self._request(method='post', data=post_data)
post_data = json.dumps({"method": "core.set_torrent_stop_ratio",
"params": [result.hash, float(ratio)],
"id": 6
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
def _set_torrent_path(self, result):
if sickbeard.TORRENT_PATH:
post_data = json.dumps({"method": "core.set_torrent_move_completed",
"params": [result.hash, True],
"id": 7
})
self._request(method='post', data=post_data)
post_data = json.dumps({"method": "core.set_torrent_move_completed_path",
"params": [result.hash, sickbeard.TORRENT_PATH],
"id": 8
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
post_data = json.dumps({"method": "core.pause_torrent",
"params": [[result.hash]],
"id": 9
})
self._request(method='post', data=post_data)
return not self.response.json()['error']
return True
api = DelugeAPI() |
egroise/uxtest | refs/heads/master | tests/selftest.sikuli/selftest.py | 1 | # uxtst library
sys.path.append("../../..")
import uxtst
reload(uxtst)
from uxtst import *
startTest("uxtest Selftest")
commentTest("Selftesting takeScreenshot")
takeScreenshot()
takeScreenshot("Named screenshot")
commentTest("Selftesting assertions (3 sucess, 3 failures)")
visible("present.png","Should succeed",2)
visible("not present.png","Should fail",2)
notVisible("present.png","Should fail")
notVisible("not present.png","Should succeed")
reportTestFail("Report custom assertion fail")
reportTestSucceeded("Report custom assertion success")
commentTest("Selftesting clicks (3 fails)")
newClick("present.png",2)
hover(Screen())
type(Key.ESC)
newClick("not present.png",2)
waitingClick("present.png",2)
hover(Screen())
type(Key.ESC)
waitingClick("not present.png",2)
waitingDoubleClick("present.png",2)
hover(Screen())
type(Key.ESC)
waitingDoubleClick("not present.png",2)
commentTest("Selftesting utilities")
found = findFirstFromLeft("present.png")
restrictScanToWindowContaining("present.png")
restrictScanToFocusedWindow()
observeWholeScreen()
AppLaunched()
endTest()
showTestReport()
|
Gybes/pyNES | refs/heads/0.1.x | pynes/tests/sei_test.py | 28 | # -*- coding: utf-8 -*-
import unittest
from pynes.compiler import lexical, syntax, semantic
class SeiTest(unittest.TestCase):
def test_sei_sngl(self):
tokens = list(lexical('SEI'))
self.assertEquals(1, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMPLIED', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x78])
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source - Copy/Lib/site-packages/idna/__init__.py | 230 | from .package_data import __version__
from .core import *
|
isandlaTech/cohorte-demos | refs/heads/dev | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/sleekxmpp/xmlstream/filesocket.py | 11 | # -*- coding: utf-8 -*-
"""
sleekxmpp.xmlstream.filesocket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is a shim for correcting deficiencies in the file
socket implementation of Python2.6.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from socket import _fileobject
import errno
import socket
class FileSocket(_fileobject):
"""Create a file object wrapper for a socket to work around
issues present in Python 2.6 when using sockets as file objects.
The parser for :class:`~xml.etree.cElementTree` requires a file, but
we will be reading from the XMPP connection socket instead.
"""
def read(self, size=4096):
"""Read data from the socket as if it were a file."""
if self._sock is None:
return None
while True:
try:
data = self._sock.recv(size)
break
except socket.error as serr:
if serr.errno != errno.EINTR:
raise
if data is not None:
return data
class Socket26(socket.socket):
"""A custom socket implementation that uses our own FileSocket class
to work around issues in Python 2.6 when using sockets as files.
"""
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return FileSocket(self._sock, mode, bufsize)
|
cvicentiu/mariadb-10.0 | refs/heads/master | storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py | 39 | # 9/23/2011 Generate blocking row lock tests
import datetime
# generate sql write queries
def mysqlgen_select_for_update(k, kv, c, cv):
print "select * from t where %s=%s for update;" % (k, kv)
def mysqlgen_select_for_update_range(k, c, where):
print "select * from t where %s%s for update;" % (k, where)
def mysqlgen_update(k, kv, c, cv):
print "update t set %s=%s where %s=%s;" % (c, c, k, kv);
def mysqlgen_update_range(k, c, where):
print "update t set %s=%s where %s%s;" % (c, c, k, where);
def mysqlgen_insert_ignore(k, kv, c, cv):
print "insert ignore t values(%s, %s);" % (kv, cv)
def mysqlgen_insert_on_dup_update(k, kv, c, cv):
print "insert t values(%s, %s) on duplicate key update %s=%s;" % (kv, cv, c, c)
def mysqlgen_replace(k, kv, c, cv):
print "replace t values(%s, %s);" % (kv, cv)
# genrate sql read queries
def mysqlgen_select_star():
print "select * from t;"
def mysqlgen_select_where(k, where):
print "select * from t where %s%s;" % (k, where)
# mysql test code generation
def mysqlgen_prepare():
print "# prepare with some common parameters"
print "connect(conn1, localhost, root);"
print "set session transaction isolation level serializable;"
print "connect(conn2, localhost, root);"
print "set session transaction isolation level serializable;"
print "connection conn1;"
print ""
def mysqlgen_reload_table():
print "# drop old table, generate new one. 4 rows"
print "--disable_warnings"
print "drop table if exists t;"
print "--enable_warnings"
print "create table t (a int primary key, b int) engine=tokudb;"
for i in range(1, 7):
mysqlgen_insert_ignore("a", i, "b", i*i)
print ""
def mysqlgen_cleanup():
print "# clean it all up"
print "drop table t;"
print ""
write_point_queries = [
("select for update", mysqlgen_select_for_update),
("update", mysqlgen_update),
("insert", mysqlgen_insert_ignore),
("replace", mysqlgen_replace) ]
write_range_queries = [
("select for update", mysqlgen_select_for_update_range),
("update", mysqlgen_update_range) ]
timeouts = [0, 500]
# Here's where all the magic happens
print "source include/have_tokudb.inc;"
print "# Tokutek"
print "# Blocking row lock tests;"
print "# Generated by %s on %s;" % (__file__, datetime.date.today())
print ""
mysqlgen_prepare()
mysqlgen_reload_table()
for timeout in timeouts:
print "# testing with timeout %s" % timeout
print "connection conn1;"
print "set session tokudb_lock_timeout=%s;" % timeout
print "connection conn2;"
print "set session tokudb_lock_timeout=%s;" % timeout
print ""
print "# testing each point query vs each point query"
for ta, qa in write_point_queries:
# point vs point contention
for tb, qb in write_point_queries:
print "# testing conflict \"%s\" vs. \"%s\"" % (ta, tb)
print "connection conn1;"
print "begin;"
print "# about to do qa.."
qa("a", "1", "b", "100")
print "connection conn2;"
for k in range(1, 5):
if k == 1:
print "--error ER_LOCK_WAIT_TIMEOUT"
qb("a", k, "b", "100")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
qb("a", "1", "b", "100")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
qb("a", "1", "b", "175")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
# point vs range contention
for rt, rq in write_range_queries:
print "# testing range query \"%s\" vs \"%s\"" % (rt, ta)
print "connection conn1;"
print "begin;"
print ""
qa("a", "1", "b", "100")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", "<=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", ">=0")
rq("a", "b", ">2")
# write range lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rq("a", "b", "<=2")
rq("a", "b", ">=0")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rq("a", "b", "<=2")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
for rt, rq in write_range_queries:
for rtb, rqb in write_range_queries:
print "# testing range query \"%s\" vs range query \"%s\"" % (rt, rtb)
print "connection conn1;"
print "begin;"
print ""
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=0 and a<=3")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=3 and a<=6")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", "<=2")
rqb("a", "b", ">=5")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", ">=3 and a<=5")
mysqlgen_select_where("a", ">=5")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rqb("a", "b", ">=0 and a<=3")
rqb("a", "b", ">=3 and a<=6")
rqb("a", "b", "<=2")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rqb("a", "b", ">=0 and a<=3")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
mysqlgen_cleanup()
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildlinux/env32/local/lib/python2.7/encodings/gb18030.py | 816 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
dex4er/mamba | refs/heads/master | spec/refactoring_goodies_spec.py | 5 | from expects import *
RETURN_VALUE = '42'
with description('Refactoring goodies'):
def a_method(self, return_value=RETURN_VALUE):
return return_value
with it('uses methods defined inside its context'):
expect(self.a_method()).to(equal(RETURN_VALUE))
with context('when using nested contexts'):
with it('uses methods defined inside its parent'):
expect(self.a_method()).to(equal(RETURN_VALUE))
|
johnkeepmoving/oss-ftp | refs/heads/master | python27/win32/Lib/test/test_scope.py | 115 | import unittest
from test.test_support import check_syntax_error, check_py3k_warnings, \
check_warnings, run_unittest
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError, "x must be >= 0"
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """\
def unoptimized_clash1(strip):
def f(s):
from string import *
return strip(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def g():
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
# XXX could allow this for exec with const argument, but what's the point
check_syntax_error(self, """\
def error(y):
exec "a = 1"
def f(x):
return x + y
return f
""")
check_syntax_error(self, """\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax_error(self, """\
def f():
def g():
from string import *
return strip # global or local?
""")
# and verify a few cases that should work
exec """
def noproblem1():
from string import *
f = lambda x:x
def noproblem2():
from string import *
def f(x):
return x + 1
def noproblem3():
from string import *
def f(x):
global y
y = x
"""
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print y
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec """
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""" in {'fail': self.fail}
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
with check_py3k_warnings(("tuple parameter unpacking has been removed",
SyntaxWarning)):
exec """\
def makeAddPair((a, b)):
def addPair((c, d)):
return (a + c, b + d)
return addPair
""" in locals()
self.assertEqual(makeAddPair((1, 2))((100, 200)), (101,202))
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec """\
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
"""
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
def testClassAndGlobal(self):
exec """\
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assertTrue(X.passed)
"""
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assertIn('h', d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assertNotIn("x", varnames)
self.assertIn("y", varnames)
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
sys.settrace(lambda a,b,c:None)
try:
x = 12
class C:
def f(self):
return x
self.assertEqual(x, 12) # Used to raise UnboundLocalError
finally:
sys.settrace(None)
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.func_code)
try:
exec g.func_code in {}
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print bad
except NameError:
pass
else:
print "bad should not be defined"
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print bad
except NameError:
pass
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def f():
global nestedcell_get
def nestedcell_get():
return c
c = (Special(),)
c = 2
f() # used to crash the interpreter...
def testGlobalInParallelNestedFunctions(self):
# A symbol table bug leaked the global statement from one
# function to other nested functions in the same block.
# This test verifies that a global statement in the first
# function does not affect the second function.
CODE = """def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
result9 = g()
result2 = h()
"""
local_ns = {}
global_ns = {}
exec CODE in local_ns, global_ns
self.assertEqual(2, global_ns["result2"])
self.assertEqual(9, global_ns["result9"])
def testTopIsNotSignificant(self):
# See #9997.
def top(a):
pass
def b():
global a
def test_main():
with check_warnings(("import \* only allowed at module level",
SyntaxWarning)):
run_unittest(ScopeTests)
if __name__ == '__main__':
test_main()
|
lekanovic/pycoin | refs/heads/master | tests/tx_test.py | 5 | #!/usr/bin/env python
import binascii
import unittest
from pycoin.serialize import b2h, h2b_rev
from pycoin.tx.Tx import Tx
TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX = (
"0100000001a8f57056b016d7d243fc0fc2a73f9146e7e4c7766ec6033b5ac4cb89c"
"64e19d0000000008a4730440220251acb534ba1b8a269260ad3fa80e075cd150d3ff"
"ba76ad20cd2e8178dee98b702202284f9c7eae3adfcf0857a901cd34f0ea338d5744caab88afad5797be643f7b7"
"014104af8385da9dc85aa153f16341a4015bc95e7ff57876b9bde40bd8450a5723a05c1c89ff2d85230d2e62c0c"
"7690b8272cf85868a0a0fc02f99a5b793f22d5c7092ffffffff02bb5b0700000000001976a9145b78716d137e386ae2"
"befc4296d938372559f37888acdd3c71000000000017a914c6572ee1c85a1b9ce1921753871bda0b5ce889ac8700000000")
class TxTest(unittest.TestCase):
def test_tx_api(self):
tx = Tx.tx_from_hex(TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX)
# this transaction is a pay-to-hash transaction
self.assertEqual(tx.id(), "e1a18b843fc420734deeb68ff6df041a2585e1a0d7dbf3b82aab98291a6d9952")
self.assertEqual(tx.txs_out[0].bitcoin_address(), "19LemzJ3XPdUxp113uynqCAivDbXZBdBy3")
self.assertEqual(tx.txs_out[1].bitcoin_address(), "3KmkA7hvqG2wKkWUGz1BySioUywvcmdPLR")
def test_blanked_hash(self):
tx = Tx.tx_from_hex(TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX)
self.assertEqual(tx.id(), "e1a18b843fc420734deeb68ff6df041a2585e1a0d7dbf3b82aab98291a6d9952")
self.assertEqual(
b2h(tx.blanked_hash()), "909579526c4c2c441687c7478d3f96249724d2ff071d2272b44500d6cf70d5d6")
tx.txs_in[0].script = b"foo"
self.assertEqual(
b2h(tx.blanked_hash()), "909579526c4c2c441687c7478d3f96249724d2ff071d2272b44500d6cf70d5d6")
tx.txs_out[0].coin_value += 1
self.assertEqual(
b2h(tx.blanked_hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b"bar"
self.assertEqual(
b2h(tx.blanked_hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b""
self.assertEqual(b2h(tx.hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b"foo"
self.assertEqual(b2h(tx.hash()), "c91910058722f1c0f52fc5c734939053c9b87882a9c72b609f21632e0bd13751")
def test_issue_39(self):
"""
See https://github.com/richardkiss/pycoin/issues/39 and
https://github.com/richardkiss/pycoin/pull/40
There was a problem validating the following transactions:
315ac7d4c26d69668129cc352851d9389b4a6868f1509c6c8b66bead11e2619f
dbf38261224ebff0c455c405e2435cfc69adb6b8a42d7b10674d9a4eb0464dca
de744408e4198c0a39310c8106d1830206e8d8a5392bcf715c9b5ec97d784edd
This codes tests this.
"""
TX_B64_LIST = [
# some encoded transactions (the three listed above and the three
# that they depend upon)
(
"AQAAAALcOOk1m9faO1g4YgThhtlAhoX0J/XlE2ZttzWqimshaQAAAABqRzBE"
"AiBdj+6zEkeORo0LUU5j4ROVjXIU+lcqzYcHmn8MwCb8XAIgD6duoFvyQ69t"
"D5F38kHK9gbQH8/V5i1r77yiTlaeXCcDIQIQChqcosGJMtZXfFjyJVgBhNDg"
"gibUGVmHSslj48Gy/v/////cOOk1m9faO1g4YgThhtlAhoX0J/XlE2ZttzWq"
"imshaQEAAABrSDBFAiAIft44cp5tNeT1FVBQGOZZIiAxJztzZpIPOT7jqxe8"
"HgIhAMpDFkt1fRptEjXxMgDUtfdt2P2k7J/ChUay31sSEejfAyECdZg5E+YA"
"k7dn6FWXypOX+y9Bjlf5mNavu8U2EWCFscv/////AUCJlQAAAAAAGXapFPzJ"
"s204z1XX1bTuTd22ssF2EvSMiKwAAAAA"
),
(
"AQAAAAEtUf3HWib/PGE4Ag4am7QPH6tuOc6W/q4yGMmuA14AqwEAAABrSDBF"
"AiEA5PGlIZB+UPxE0zEy7pjJcVpk350sKGDj4EdMUhq4U34CIDCvjTUGpTUu"
"KwVkRazYVaQtNycOlKYpp7KLIYcOxtdhASEDgIxJPwYZkNK+AB5A8EiuiHAy"
"C3SJXOLZZS88HHPNbyz/////AvCHSwAAAAAAGXapFPzJs204z1XX1bTuTd22"
"ssF2EvSMiKzwh0sAAAAAABl2qRQzzvYXSdEboq3wkaXgRWeBd/46bYisAAAA"
"AA=="
),
(
"AQAAAAJa+fLO2OCiRk98qhSvobvRyPsY3qrl0QEZa1jcIn70rgAAAABqRzBE"
"AiANKFITbLHEu93eBOx29YHRsyockZFIyF+8D9BWXTWK8wIgNvKqF87Ind6w"
"A3aigYv3KMRHmSgLnyBExWkad7Dc2WwDIQIQChqcosGJMtZXfFjyJVgBhNDg"
"gibUGVmHSslj48Gy/v////9a+fLO2OCiRk98qhSvobvRyPsY3qrl0QEZa1jc"
"In70rgEAAABrSDBFAiEA9APIYMTjztPlIyyzWCXnk3It+vCsLwGWGpN4K0kG"
"qWMCIGLdifJz5mvPrW8FqLDNJrp7Bma+/Qw9pF2feVcX2lBKAyECdZg5E+YA"
"k7dn6FWXypOX+y9Bjlf5mNavu8U2EWCFscv/////AaAClAAAAAAAGXapFOUK"
"XY2jOZUbBAutBFPXxAz9dNPciKwAAAAA"
),
(
"AQAAAAGfYeIRrb5mi2ycUPFoaEqbONlRKDXMKYFmaW3C1MdaMQAAAABsSTBG"
"AiEAhIisrGQ/6Sa7DAJtv+pa9nMiHuBTLNAkxlyzDjYvGEQCIQCFH27K+zjJ"
"ItZHnrCORpOhrBnHvPnUX8mqXy1pGB/4ngEhAhAKGpyiwYky1ld8WPIlWAGE"
"0OCCJtQZWYdKyWPjwbL+/////wKgxEoAAAAAABl2qRT8ybNtOM9V19W07k3d"
"trLBdhL0jIisoMRKAAAAAAAZdqkUM872F0nRG6Kt8JGl4EVngXf+Om2IrAAA"
"AAA="
),
(
"AQAAAALCBkSoNGHOnUgtcCy8I87ODdMmW1WL56GNNOIWvaccAAAAAABrSDBF"
"AiAxKffbGKLs4sDhPFwLZvQlHX+Q20uxr0hFzQqtnSQZQAIhAImY0R1z7HrT"
"Tt4hR0R/3n3eS8LXk14G94/O8Pc7LDlmAyECE2UQ39BTBuo0mCvz395yuOSd"
"QyqYBb9kUtOZTnkvnRn/////yRF9O6xy+bn8PWf3KNM1uywKHCYWOL0bgEe1"
"Zd1jGaIAAAAAakcwRAIgRQ7h/BpT6uurhfpEmEE/Xx5OAZdUohj+Euzr3Zg8"
"mbkCIDxIakZ02TMLAtt5OHKyy0VQw7uywxjyis6540zeNZdJAyED78tvrsro"
"6386Jta3YJd/I64guTuYS8oof9K4PDGZeHD/////AeD9HAAAAAAAGXapFB0x"
"6lo758/yr1vtc3EOtvXV9n1wiKwAAAAA"
),
(
"AQAAAAKerCh2TFeXmFaXU1qdQUucoCL5WRFVNZdvNt1FZgp5XQAAAACMSTBG"
"AiEAvLz97Qz/zSlKSDrllLRwj73G2B7RfaiR1ZspOG5Ae3kCIQD5ATZgiNvH"
"X8Tn8Ib8RohgW0HGbPRi00XUcvxCTmybGgFBBCsXId9LDBz91gENMCmVXxRE"
"ZI+E6QOSkToVTtny7tiOJhmHy/jci4KzQmucvUBotsK5r4CiwjhjOkAAXRD6"
"SWD/////6864dM1/4fxjvltUc0HJ1da9agsSw4LV3KYhGR7FJ+MBAAAAi0gw"
"RQIhAJIopjUy7dPOHa+LGTvgM4jfZ8pA522/Jx3+uFC4Lz5IAiBzLNoxejaa"
"dw1CXwOUuzI4rMl0xsuYC5XQaxZNT2TFzwFBBBPpriULEjb9VdVoC8v3E4is"
"RMmfQByPCJYadSwK/ZZg9TTFGyDXUwW+dQ9tScDzhMWfdLK9DyV4iAbnYh/S"
"2cr/////A0BCDwAAAAAAGXapFFzGycfh13x6rrUPhNJNj2ViE7xbiKwACT0A"
"AAAAABl2qRQhQVEH8cwnc3//rGPcfvakBANJxIistBcsAAAAAAAZdqkUMQV+"
"QpfDgBAsCQ+ixaUK5Kgl0kOIrAAAAAA="
),
(
"AQAAAAO1CFlm1mEB3fjCtilQEH+6TbR3UzdJyqafj3mab9Mc6gAAAACKRzBE"
"AiA8rWZ4BB8YYJp3xtx8jAZdrfQ6B0zjYRdgTS7I5LZF7gIgabCjn9iu9L3n"
"YvKrdXFJJygtbg6V8iMTLrPh8ghdGvwBQQQrFyHfSwwc/dYBDTAplV8URGSP"
"hOkDkpE6FU7Z8u7YjiYZh8v43IuCs0JrnL1AaLbCua+AosI4YzpAAF0Q+klg"
"/////8IGRKg0Yc6dSC1wLLwjzs4N0yZbVYvnoY004ha9pxwAAQAAAItIMEUC"
"IDNZYWLuCV0nJL6CCGgUfQfNoh0oAACd2lMZn+zJdJCDAiEAqZafa18G1K1x"
"/6yOvj8h1uAGSM8UjSJJ6479li5sos4BQQTswrqYR5m+x0vFTzgGrrM2k+Gx"
"gX+hDBAvN8Kq9RRuWdqC4jVNGhGdFD63Ev1TQYXMqvp6b9ztbAZ3ED8i6sFo"
"/////0Vf19DzvUs2DvFwlVW9viTF+YlXCNYNMD6yUXK9I9RBAgAAAItIMEUC"
"IQCKbaQY2eH1fsXZFksstrP4B+uxPBwGRe2Wxl7rW5sYGwIgVvVEPdnJNvVj"
"rh0XZdhqnOAA0Sw39Upqkejrm+yXWnwBQQQ1hDJBuzoTc1ZJ8zyVQjEfRcjW"
"o8rq3lE+3x3rYZ3Q/9xBEBtsnkFAzps/N8n6C5cK2QAmRGxeGFmbYaGFT5RP"
"/////wNAQg8AAAAAABl2qRSU70Qwi2d2bI+nKnCP19XGsbSnWoisVEkwAAAA"
"AAAZdqkUgroT7ai54LzKPXVnWJsPoV6lJ0yIrHjrFQAAAAAAGXapFEFyZV9I"
"izJXnWmTivO2n9OKDWCdiKwAAAAA"
),
(
"AQAAAAHBHumhtHyFj2ma501AFchO/RrrfkY1sYTKsJiYe6i5pAEAAADaAEcw"
"RAIgJQsMj5xe4yyGSQOseNBu7zuQNbdwYRpmu4tyOeVrDhoCIHTRJ5lHr5OH"
"JsmDYl4nTEMhT2TeEN8tMNtrt/rFLMaHAUgwRQIhAObKZ2o5NubR2aoXKP7q"
"oNMI3sv4u33Hnxcu1NBCilhoAiAH5OaEGAC5snVQDIWgXXVWICosFmTHHjXg"
"y5fNwAO5gAFHUiECzr9qtYCUjRRrfMdx2OZGl0NJ09exHz4DKH0Jl6R307kh"
"A3umUUhbeiyyIhketkpVkm5iu6v+m17SqUiKrVR7IEKCUq7/////An1KDwAA"
"AAAAGXapFNxnIa33YyARGtMFwzhMdn1LmeGViKxllyYPAAAAABepFNsSg3N8"
"2T68HrEpjWRKeEbFWm2WhwAAAAA="
),
(
"AQAAAAHZI2Rm7Gvz7UMEKi20P7AIT5AOxlhwW29S0uFz9EPz1QEAAADaAEgw"
"RQIhAIX1NZuYzrKUHFAxUNYI6yWMUuzCEapuZOUY6TdCspWaAiAchzgPP6if"
"WNh0cmVkyW1UpygM/eVa1XrtHepCMhvArAFHMEQCIGLJtKtbyJEaH6iQS+hK"
"xUlGrWwmqdJScz/JfSZ1Qln6AiBNRC+gswEEMjTNR5uVetqCGJkNL2m6fDfk"
"DyICU/otoQFHUiECzr9qtYCUjRRrfMdx2OZGl0NJ09exHz4DKH0Jl6R307kh"
"A3umUUhbeiyyIhketkpVkm5iu6v+m17SqUiKrVR7IEKCUq7/////Aux5CAAA"
"AAAAGXapFDIKbLrYWAn/2ZTB7ToisbIaZ5DoiKzL5TUPAAAAABepFNsSg3N8"
"2T68HrEpjWRKeEbFWm2WhwAAAAA="
),
(# 837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba
"AQAAAAGsp/O0VlTCMOCIalf7mIwwRO9ej385cm0wXGHV6BiQPAAAAAD9XQEAS"
"DBFAiABh6+Sjp0VXEsaycHJEYFTI5q6dndPd118H5w+EG/zPAIhAIgisPZY7e"
"wiJ00LauneEOvy2gaxu9qrpOUOsHjznj14AUcwRAIgeV8PT1lBp3rgMuy54zd"
"TeI1+tcsMeNgFV11rAKHZv+0CID4fStkzLRQWrgHicDjpRbydtZxzJyijg6bx"
"7S+5naekAUzJUkEEkbuiUQkSpb032h+1sWcwEOQ9LG2BLFFOkb+p8usSnhwYM"
"ynbVb2GjiCarC+8Assz2Y/nS/I/DCNdYSax2DNPhkEEhlxAKTpoDLnAIOex4Q"
"bYwZFtPO+ZqkMaVtJT5pJW2sCe8SKxqYaBiny2JFMvBiwdH4ciCEhhxcMpHM/"
"+9OxodEEEjSRV0kA+CHCPwfVWAC8bbNg/mS0IUJf5l0qwiiiDjweJb7qwjzlJ"
"XhX6b61u2/sedU41+hx4RMQfMioYY9RiE1Ou/////wFAQg8AAAAAABl2qRSuV"
"rTbE1VNMhxALbOWEYeu0bvtW4isAAAAAA=="
),
(# 3c9018e8d5615c306d72397f8f5eef44308c98fb576a88e030c25456b4f3a7ac
# input of
# 837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba
"AQAAAAGJYyhI+ZcikVcnxcddqNstvxlDQqBCmCj2b/iPqyr31gAAAACLSDBFA"
"iEAq7yKc/4gVEgL2j8ygdotDFHihBORq9TAn0+QiiA0wY0CIFvJ5NaOr7kY8+"
"lmIzhkekQZwN4aZQq4mD8dIW4qMdjjAUEEb1XXre/2ARx+rClP5UDFeDC+gOk"
"1XIOGnJJgpLi/R2ema6y9cLgE3GPVvusUGAKSrX87CDNysdAtejfdl/9cnv//"
"//8BQEIPAAAAAAAXqRT4FbA22bu85enyoAq9G/PckelVEIcAAAAA"
)
]
TX_LIST = [Tx.tx_from_hex(b2h(binascii.a2b_base64(b64.encode("utf8")))) for b64 in TX_B64_LIST]
TX_DB = dict((tx.hash(), tx) for tx in TX_LIST)
for h in ["315ac7d4c26d69668129cc352851d9389b4a6868f1509c6c8b66bead11e2619f",
"dbf38261224ebff0c455c405e2435cfc69adb6b8a42d7b10674d9a4eb0464dca",
"de744408e4198c0a39310c8106d1830206e8d8a5392bcf715c9b5ec97d784edd",
"485716e53b422aca0fe5b1ded21360695ce5f49255d80e10db56458ed6962ff3",
"837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba"]:
tx = TX_DB.get(h2b_rev(h))
self.assertNotEqual(tx, None)
tx.unspents_from_db(TX_DB)
for idx, tx_in in enumerate(tx.txs_in):
self.assertTrue(tx.is_signature_ok(tx_in_idx=idx))
def tx_to_b64(tx_hex):
# use this to dump raw transactions in the data above
import io
tx = Tx.tx_from_hex(tx_hex)
f = io.BytesIO()
tx.stream(f)
d = f.getvalue()
for idx in range(0, len(d), 45):
print('"%s"' % binascii.b2a_base64(d[idx:idx+45]).decode("utf8")[:-1])
if __name__ == "__main__":
unittest.main()
|
ameistad/django-template | refs/heads/master | {{cookiecutter.repo_name}}/config/settings/development.py | 3 | # -*- coding: utf-8 -*-
# Development settings
from .base import * # noqa
DEBUG = True
ALLOWED_HOSTS = []
# Development specific django packages.
INSTALLED_APPS += ('django_extensions', )
# Email backend that writes messages to console instead of sending them.
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
codervince/flashingredlight | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
PuchatekwSzortach/convolutional_network | refs/heads/master | tests/net/test_layers.py | 1 | """
Tests for net.layers module
"""
import pytest
import numpy as np
import net.layers
class TestInput:
"""
Tests for Input layer class
"""
def test_build_sample_shape_is_a_list(self):
input = net.layers.Input(sample_shape=[2, 3, 4])
input.build(input_shape=None)
assert (None, 2, 3, 4) == input.input_shape
assert (None, 2, 3, 4) == input.output_shape
def test_build_sample_shape_is_a_tuple(self):
input = net.layers.Input(sample_shape=(4, 3, 2))
input.build(input_shape=None)
assert (None, 4, 3, 2) == input.input_shape
assert (None, 4, 3, 2) == input.output_shape
def test_forward(self):
input = net.layers.Input([2, 3, 4])
x = np.arange(24).reshape((1, 2, 3, 4))
assert np.all(x == input.forward(x))
def test_forward_incompatible_shape(self):
input = net.layers.Input([2, 3, 4])
x = np.arange(48).reshape((1, 2, 3, 8))
with pytest.raises(ValueError):
input.forward(x)
def test_train_forward_simple(self):
input = net.layers.Input([4])
x = np.array([1, 2, 3, 4]).reshape(1, 4)
expected = x
actual = input.train_forward(x)
assert np.all(expected == actual)
def test_train_backward_simple(self):
input = net.layers.Input([4])
gradients = np.array([1])
assert input.train_backward(gradients) is None
class TestFlatten:
"""
Tests for Flatten layer
"""
def test_build_last_sample_dimension_not_squeezed(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 1, 4])
assert (None, 1, 4) == flatten.input_shape
assert (None, 4) == flatten.output_shape
def test_build_first_sample_dimension_not_squeezed(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 5, 1])
assert (None, 5, 1) == flatten.input_shape
assert (None, 5) == flatten.output_shape
def test_forward_nothing_to_squeeze(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 3, 4])
x = np.arange(24).reshape((2, 3, 4))
expected = x
actual = flatten.forward(x)
assert expected.shape == actual.shape
assert np.all(expected == actual)
def test_forward_invalid_input_shape(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[3, 4])
x = np.arange(4).reshape((2, 2))
with pytest.raises(ValueError):
flatten.forward(x)
def test_forward_with_squeeze(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 1, 2, 2])
x = np.arange(8).reshape((2, 1, 2, 2))
expected = x.reshape((2, 2, 2))
actual = flatten.forward(x)
assert expected.shape == actual.shape
assert np.all(expected == actual)
def test_forward_batch_size_is_one(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 1, 2, 2])
x = np.arange(4).reshape((1, 1, 2, 2))
expected = np.arange(4).reshape((1, 2, 2))
actual = flatten.forward(x)
assert expected.shape == actual.shape
assert np.all(expected == actual)
def test_train_backward_simple(self):
flatten = net.layers.Flatten()
flatten.build(input_shape=[None, 1, 1, 3])
x = np.arange(6).reshape((2, 1, 1, 3))
gradients = np.squeeze(2 * x)
expected = 2 * x
flatten.train_forward(x)
actual = flatten.train_backward(gradients)
assert expected.shape == actual.shape
assert np.all(expected == actual)
class TestSoftmax:
"""
Tests for Softmax layer
"""
def test_build_simple(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 10))
assert (None, 10) == softmax.input_shape
assert (None, 10) == softmax.output_shape
def test_build_shape_more_than_2D(self):
softmax = net.layers.Softmax()
with pytest.raises(ValueError):
softmax.build(input_shape=(None, 20, 5))
def test_build_label_shape_less_than_two(self):
softmax = net.layers.Softmax()
with pytest.raises(ValueError):
softmax.build(input_shape=(None, 1))
def test_forward_simple(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 2))
x = np.array(
[
[1, 2],
[1, 4]
])
expected = np.array(
[
[np.exp(1) / (np.exp(1) + np.exp(2)), np.exp(2) / (np.exp(1) + np.exp(2))],
[np.exp(1) / (np.exp(1) + np.exp(4)), np.exp(4) / (np.exp(1) + np.exp(4))]
]
)
actual = softmax.forward(x)
assert np.all(expected == actual)
def test_forward_input_dimension_larger_than_2(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 2))
x = np.arange(16).reshape(2, 4, 2)
with pytest.raises(ValueError):
softmax.forward(x)
def test_forward_label_dimension_is_1(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 2))
x = np.arange(10).reshape(10, 1)
with pytest.raises(ValueError):
softmax.forward(x)
def test_forward_very_large_inputs(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 2))
x = np.array(
[
[1, 2000],
[5000, 4]
])
expected = np.array(
[
[0, 1],
[1, 0]
]
)
actual = softmax.forward(x)
assert np.allclose(expected, actual)
def test_get_output_layer_error_gradients_simple(self):
softmax = net.layers.Softmax()
softmax.build(input_shape=(None, 2))
x = np.array([
[1, 2],
[1, 4],
[2, 3],
])
y = np.array([
[1, 0],
[1, 0],
[0, 1]
])
expected = np.array([
[0.269 - 1, 0.731],
[0.047 - 1, 0.9523],
[0.268, 0.731 - 1]
])
softmax.train_forward(x)
actual = softmax.get_output_layer_error_gradients(y)
assert expected.shape == actual.shape
assert np.allclose(expected, actual, atol=0.01)
class TestConvolution2D:
"""
Tests for Convolution2D layer
"""
def test_build_simple(self):
convolution = net.layers.Convolution2D(filters=3, rows=4, columns=5)
convolution.build(input_shape=(None, 10, 10, 8))
assert (None, 10, 10, 8) == convolution.input_shape
assert (None, 7, 6, 3) == convolution.output_shape
assert (3, 4, 5, 8) == convolution.kernels.shape
assert (3,) == convolution.biases.shape
def test_build_input_not_4D(self):
convolution = net.layers.Convolution2D(filters=3, rows=4, columns=5)
with pytest.raises(ValueError):
convolution.build(input_shape=(None, 10, 10))
def test_forward_one_image_4x4x1_one_2x2x1_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 4, 4, 1))
images = np.array(
[
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]
]
).reshape((1, 4, 4, 1))
kernel = np.array([
[2, 3],
[1, 2]
]).reshape(1, 2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([2])
expected = np.array([
[7, 6, 5],
[3, 5, 9],
[6, 5, 6]
]).reshape(1, 3, 3, 1)
actual = convolution.forward(images)
assert expected.shape == actual.shape
assert np.all(expected == actual)
def test_forward_single_4x4x1_image_two_2_2_1_kernels(self):
convolution = net.layers.Convolution2D(filters=2, rows=2, columns=2)
convolution.build(input_shape=(None, 4, 4, 1))
images = np.array(
[
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]
]
).reshape((1, 4, 4, 1))
first_kernel = np.array([
[2, 3],
[1, 2]
]).reshape(2, 2, 1)
second_kernel = np.array([
[-1, 2],
[4, 0]
]).reshape(2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = np.array([first_kernel, second_kernel])
# Overwrite biases with known values
convolution.biases = np.array([2, -2])
expected_first_channel = np.array([
[7, 6, 5],
[3, 5, 9],
[6, 5, 6]
])
expected_second_channel = np.array([
[0, 0, 2],
[2, 0, 0],
[0, 2, 4]
])
expected = np.dstack([expected_first_channel, expected_second_channel]).reshape((1, 3, 3, 2))
actual = convolution.forward(images)
assert expected.shape == actual.shape
assert np.all(expected == actual)
def test_train_forward_one_4x4x1_image_one_2x2x1_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 4, 4, 1))
images = np.array(
[
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]
]
).reshape((1, 4, 4, 1))
kernel = np.array([
[2, 3],
[1, 2]
]).reshape(1, 2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([2])
expected = np.array([
[7, 6, 5],
[3, 5, 9],
[6, 5, 6]
]).reshape(1, 3, 3, 1)
actual = convolution.train_forward(images)
assert np.all(images == convolution.last_input)
assert np.all(expected == actual)
assert np.all(expected == convolution.last_output)
def test_train_forward_two_2x2x1_images_one_2x2x1_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 2, 2, 1))
first_image = np.array([
[2, 3],
[-2, 0]
]).reshape(2, 2, 1)
second_image = np.array([
[-1, 1],
[3, -1]
]).reshape(2, 2, 1)
images = np.array([first_image, second_image])
kernel = np.array([
[2, 3],
[1, 2]
]).reshape(1, 2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([-1])
expected = np.array([10, 1]).reshape(2, 1, 1, 1)
actual = convolution.forward(images)
assert np.all(expected == actual)
def test_train_forward_two_3x3x2_images_two_2x2x2_kernels(self):
convolution = net.layers.Convolution2D(filters=2, rows=2, columns=2)
convolution.build(input_shape=(None, 3, 3, 2))
first_image_first_channel = np.array([
[-1, 2, 3],
[1, 0, 1],
[2, 2, 0]
])
first_image_second_channel = np.array([
[0, 4, 2],
[-1, 1, 1],
[0, 1, 0]
])
first_image = np.dstack([first_image_first_channel, first_image_second_channel])
second_image_first_channel = np.array([
[3, -2, 0],
[1, 1, 2],
[0, 4, -2]
])
second_image_second_channel = np.array([
[1, -1, 2],
[0, 3, 1],
[1, 4, 0]
])
second_image = np.dstack([second_image_first_channel, second_image_second_channel])
images = np.array([first_image, second_image])
first_kernel_first_channel = np.array([
[-2, 0],
[1, 1]
])
first_kernel_second_channel = np.array([
[0, -1],
[2, 2]
])
first_kernel = np.dstack([first_kernel_first_channel, first_kernel_second_channel])
second_kernel_first_channel = np.array([
[1, -2],
[1, 0]
])
second_kernel_second_channel = np.array([
[3, 1],
[0, 1]
])
second_kernel = np.dstack([second_kernel_first_channel, second_kernel_second_channel])
convolution.kernels = np.array([first_kernel, second_kernel])
# Overwrite biases with known values
convolution.biases = np.array([3, -1])
expected_first_image_first_channel = np.array([
[2, 2],
[6, 6]
])
expected_first_image_second_channel = np.array([
[0, 10],
[1, 3]
])
expected_first_image = np.dstack([expected_first_image_first_channel, expected_first_image_second_channel])
expected_second_image_first_channel = np.array([
[6, 16],
[12, 10]
])
expected_second_image_second_channel = np.array([
[12, 0],
[5, 10]
])
expected_second_image = np.dstack([expected_second_image_first_channel, expected_second_image_second_channel])
expected_images = np.array([expected_first_image, expected_second_image])
actual = convolution.forward(images)
assert np.all(expected_images == actual)
def test_train_backward_simple_one_input_channel_and_one_output_channel_single_sample_2x2_image(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 2, 2, 1))
image = np.array([
[2, 3],
[5, 1]
]).reshape((1, 2, 2, 1))
kernel = np.array([
[2, -2],
[0, 1]
], dtype=np.float32).reshape(1, 2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([2], dtype=np.float32)
expected_activation = np.array([1]).reshape(1, 1, 1, 1)
actual_activation = convolution.train_forward(image)
assert np.all(expected_activation == actual_activation)
gradients = np.array([0.5]).reshape(1, 1, 1, 1)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([1.5])
expected_kernels = np.array([
[1, -3.5],
[-2.5, 0.5]
]).reshape(1, 2, 2, 1)
assert np.all(expected_biases == convolution.biases)
assert np.all(expected_kernels == convolution.kernels)
expected_image_gradients = np.array([
[1, -1],
[0, 0.5]
]).reshape(1, 2, 2, 1)
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_simple_one_input_channel_and_one_output_channel_single_sample_3x3_image(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 3, 3, 1))
image = np.array([
[2, 0, -1],
[1, 1, 2],
[3, -2, 0]
]).reshape((1, 3, 3, 1))
kernel = np.array([
[2, -2],
[0, 1]
], dtype=np.float32).reshape(1, 2, 2, 1)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([4], dtype=np.float32)
expected_activation = np.array([
[9, 8],
[2, 2]
]).reshape(1, 2, 2, 1)
actual_activation = convolution.train_forward(image)
assert np.all(expected_activation == actual_activation)
gradients = np.array([
[1, 2],
[-1, -2]
]).reshape(1, 2, 2, 1)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([4])
expected_kernels = np.array([
[3, 5],
[-4, -6]
]).reshape(1, 2, 2, 1)
assert np.all(expected_biases == convolution.biases)
assert np.all(expected_kernels == convolution.kernels)
expected_image_gradients = np.array([
[2, 2, -4],
[-2, -1, 6],
[0, -1, -2]
]).reshape(1, 3, 3, 1)
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_2x2x2_image_3_filters(self):
convolution = net.layers.Convolution2D(filters=3, rows=2, columns=2)
convolution.build(input_shape=(None, 2, 2, 2))
first_channel = np.array([
[1, 3],
[4, 0]
])
second_channel = np.array([
[2, 0],
[1, -2]
])
image = np.dstack([first_channel, second_channel]).reshape(1, 2, 2, 2)
first_kernel_first_channel = np.array([
[2, -2],
[0, 1]
], dtype=np.float32)
first_kernel_second_channel = np.array([
[1, 0],
[0, 1]
], dtype=np.float32)
first_kernel = np.dstack([first_kernel_first_channel, first_kernel_second_channel])
second_kernel_first_channel = np.array([
[-1, 3],
[0, 1]
], dtype=np.float32)
second_kernel_second_channel = np.array([
[1, 1],
[0, 0]
], dtype=np.float32)
second_kernel = np.dstack([second_kernel_first_channel, second_kernel_second_channel])
third_kernel_first_channel = np.array([
[-2, 0],
[1, 1]
], dtype=np.float32)
third_kernel_second_channel = np.array([
[0, 1],
[3, 0]
], dtype=np.float32)
third_kernel = np.dstack([third_kernel_first_channel, third_kernel_second_channel])
kernels = np.array([first_kernel, second_kernel, third_kernel])
# Overwrite kernels with known values
convolution.kernels = kernels
# Overwrite biases with known values
convolution.biases = np.array([2, -2, 3], dtype=np.float32)
expected_activation = np.array([
[0, 8, 8],
]).reshape(1, 1, 1, 3)
actual_activation = convolution.train_forward(image)
assert np.all(expected_activation == actual_activation)
gradients = np.array([
[1, 2, -4]
]).reshape(1, 1, 1, 3)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([2, -4, 7])
assert np.all(expected_biases == convolution.biases)
first_kernel_first_channel_expected = np.array([
[2, -2],
[0, 1]
])
first_kernel_second_channel_expected = np.array([
[1, 0],
[0, 1]
])
first_kernel_expected = np.dstack([first_kernel_first_channel_expected, first_kernel_second_channel_expected])
second_kernel_first_channel_expected = np.array([
[-3, -3],
[-8, 1]
])
second_kernel_second_channel_expected = np.array([
[-3, 1],
[-2, 4]
])
second_kernel_expected = np.dstack([second_kernel_first_channel_expected, second_kernel_second_channel_expected])
third_kernel_first_channel_expected = np.array([
[2, 12],
[17, 1]
])
third_kernel_second_channel_expected = np.array([
[8, 1],
[7, -8]
])
third_kernel_expected = np.dstack(
[third_kernel_first_channel_expected, third_kernel_second_channel_expected])
expected_kernels = np.array([first_kernel_expected, second_kernel_expected, third_kernel_expected])
assert np.all(expected_kernels == convolution.kernels)
expected_image_gradients_first_channel = np.array([
[6, 6],
[-4, -2]
])
expected_image_gradients_second_channel = np.array([
[2, -2],
[-12, 0]
])
expected_image_gradients = np.dstack(
[expected_image_gradients_first_channel, expected_image_gradients_second_channel]).reshape((1, 2, 2, 2))
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_one_3x3x2_image_2_kernels_2x2x2(self):
convolution = net.layers.Convolution2D(filters=2, rows=2, columns=2)
convolution.build(input_shape=(None, 3, 3, 2))
first_channel = np.array([
[1, 2, 0],
[-1, 0, 3],
[2, 2, 0]
])
second_channel = np.array([
[0, -2, 1],
[3, 1, 1],
[1, 2, 0]
])
image = np.dstack([first_channel, second_channel]).reshape(1, 3, 3, 2)
first_kernel_first_channel = np.array([
[2, 0],
[1, 1]
])
first_kernel_second_channel = np.array([
[1, -1],
[2, 0]
])
first_kernel = np.dstack([first_kernel_first_channel, first_kernel_second_channel])
second_kernel_first_channel = np.array([
[-1, 3],
[1, 0]
])
second_kernel_second_channel = np.array([
[2, -1],
[1, 1]
])
second_kernel = np.dstack([second_kernel_first_channel, second_kernel_second_channel])
kernels = np.array([first_kernel, second_kernel], dtype=np.float32)
# Overwrite kernels with known values
convolution.kernels = kernels
# Overwrite biases with known values
convolution.biases = np.array([-1, 4], dtype=np.float32)
expected_activation_first_channel = np.array([
[8, 5],
[5, 5]
])
expected_activation_second_channel = np.array([
[14, 0],
[15, 18]
])
expected_activation = np.dstack([expected_activation_first_channel, expected_activation_second_channel])\
.reshape(1, 2, 2, 2)
actual_activation = convolution.train_forward(image)
assert np.all(expected_activation == actual_activation)
first_channel_gradients = np.array([
[-1, 2],
[1, 3]
])
second_channel_gradients = np.array([
[1, 0],
[2, -4]
])
gradients = np.dstack([first_channel_gradients, second_channel_gradients]).reshape(1, 2, 2, 2)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([-6, 5])
assert np.all(expected_biases == convolution.biases)
first_kernel_first_channel_expected = np.array([
[0, -7],
[-8, -7]
])
first_kernel_second_channel_expected = np.array([
[-1, -9],
[-4, -3]
])
first_kernel_expected = np.dstack([first_kernel_first_channel_expected, first_kernel_second_channel_expected])
second_kernel_first_channel_expected = np.array([
[0, 13],
[6, -4]
])
second_kernel_second_channel_expected = np.array([
[0, 3],
[4, -4]
])
second_kernel_expected = np.dstack([second_kernel_first_channel_expected, second_kernel_second_channel_expected])
expected_kernels = np.array([first_kernel_expected, second_kernel_expected])
assert np.all(expected_kernels == convolution.kernels)
expected_image_gradients_first_channel = np.array([
[-3, 7, 0],
[0, 17, -10],
[3, 0, 3]
])
expected_image_gradients_second_channel = np.array([
[1, 2, -2],
[4, -3, 1],
[4, 4, -4]
])
expected_image_gradients = np.dstack(
[expected_image_gradients_first_channel, expected_image_gradients_second_channel])
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_two_2x2x1_images_one_2x2x1_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 2, 2, 1))
first_image = np.array([
[1, -2],
[4, 3]
]).reshape(2, 2, 1)
second_image = np.array([
[2, 1],
[3, 2]
]).reshape(2, 2, 1)
images = np.array([first_image, second_image]).astype(np.float32)
kernel = np.array([
[1, 2],
[4, -1]
]).reshape(1, 2, 2, 1).astype(np.float32)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([2], dtype=np.float32)
expected_activation = np.array([12, 16]).reshape(2, 1, 1, 1)
actual_activation = convolution.train_forward(images)
assert np.all(expected_activation == actual_activation)
gradients = np.array([1, 2]).reshape(2, 1, 1, 1).astype(np.float32)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([0.5])
assert np.all(expected_biases == convolution.biases)
expected_kernels = np.array([
[-1.5, 2],
[-1, -4.5]
]).reshape(1, 2, 2, 1)
assert np.all(expected_kernels == convolution.kernels)
expected_first_image_gradients = np.array([
[1, 2],
[4, -1]
]).reshape((2, 2, 1))
expected_second_image_gradients = np.array([
[2, 4],
[8, -2]
]).reshape((2, 2, 1))
expected_image_gradients = np.array([expected_first_image_gradients, expected_second_image_gradients])
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_two_2x2x2_images_one_2x2x2_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 2, 2, 2))
first_image_first_channel = np.array([
[1, 0],
[-2, 3]
])
first_image_second_channel = np.array([
[1, 1],
[0, 2]
])
first_image = np.dstack([first_image_first_channel, first_image_second_channel])
second_image_first_channel = np.array([
[3, -1],
[0, 1]
])
second_image_second_channel = np.array([
[0, 3],
[-1, 2]
])
second_image = np.dstack([second_image_first_channel, second_image_second_channel])
images = np.array([first_image, second_image])
kernel_first_channel = np.array([
[3, 2],
[-1, 0]
])
kernel_second_channel = np.array([
[0, -2],
[1, 0]
])
kernel = np.dstack([kernel_first_channel, kernel_second_channel]).reshape(1, 2, 2, 2).astype(np.float32)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([3], dtype=np.float32)
expected_first_activation = np.array([6]).reshape(1, 1, 1)
expected_second_activation = np.array([3]).reshape(1, 1, 1)
expected_activation = np.array([expected_first_activation, expected_second_activation])
actual_activation = convolution.train_forward(images)
assert np.all(expected_activation == actual_activation)
gradients = np.array([2, 4]).reshape(2, 1, 1, 1).astype(np.float32)
learning_rate = 1
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([0])
assert np.all(expected_biases == convolution.biases)
expected_kernels_first_channel = np.array([
[-4, 4],
[1, -5]
])
expected_kernels_second_channel = np.array([
[-1, -9],
[3, -6]
])
expected_kernels = np.dstack(
[expected_kernels_first_channel, expected_kernels_second_channel]).reshape(1, 2, 2, 2)
assert np.all(expected_kernels == convolution.kernels)
expected_first_image_gradients_first_channel = np.array([
[6, 4],
[-2, 0]
])
expected_first_image_gradients_second_channel = np.array([
[0, -4],
[2, 0]
])
expected_first_image_gradients = np.dstack(
[expected_first_image_gradients_first_channel, expected_first_image_gradients_second_channel])
expected_second_image_gradients_first_channel = np.array([
[12, 8],
[-4, 0]
])
expected_second_image_gradients_second_channel = np.array([
[0, -8],
[4, 0]
])
expected_second_image_gradients = np.dstack(
[expected_second_image_gradients_first_channel, expected_second_image_gradients_second_channel])
expected_image_gradients = np.array([expected_first_image_gradients, expected_second_image_gradients])
assert np.all(actual_image_gradients == expected_image_gradients)
def test_train_backward_two_3x3x2_images_one_2x2x2_kernel(self):
convolution = net.layers.Convolution2D(filters=1, rows=2, columns=2)
convolution.build(input_shape=(None, 3, 3, 2))
first_image_first_channel = np.array([
[1, 0, 2],
[1, -2, 3],
[-2, 0, 3]
])
first_image_second_channel = np.array([
[1, 1, -4],
[3, 0, 2],
[-1, -1, 2]
])
first_image = np.dstack([first_image_first_channel, first_image_second_channel])
second_image_first_channel = np.array([
[-1, 2, 0],
[1, 1, 1],
[0, 0, 3]
])
second_image_second_channel = np.array([
[2, 0, -1],
[1, 1, 0],
[-1, -1, 1]
])
second_image = np.dstack([second_image_first_channel, second_image_second_channel])
images = np.array([first_image, second_image])
kernel_first_channel = np.array([
[1, 2],
[-1, 0]
])
kernel_second_channel = np.array([
[0, 6],
[-3, 0]
])
kernel = np.dstack([kernel_first_channel, kernel_second_channel]).reshape(1, 2, 2, 2).astype(np.float32)
# Overwrite kernels with known values
convolution.kernels = kernel
# Overwrite biases with known values
convolution.biases = np.array([2], dtype=np.float32)
expected_first_activation = np.array([
[0, 0],
[4, 21]
]).reshape(2, 2, 1)
expected_second_activation = np.array([
[1, 0],
[14, 8]
]).reshape(2, 2, 1)
expected_activation = np.array([expected_first_activation, expected_second_activation])
actual_activation = convolution.train_forward(images)
assert np.all(expected_activation == actual_activation)
first_image_gradients = np.array([
[2, -2],
[1, 1]
]).reshape(2, 2, 1)
second_image_gradients = np.array([
[3, 1],
[-1, 2]
]).reshape(2, 2, 1)
gradients = np.array([first_image_gradients, second_image_gradients])
learning_rate = 2
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([-4])
assert np.all(expected_biases == convolution.biases)
expected_kernels_first_channel = np.array([
[4, -6],
[-2, -12]
])
expected_kernels_second_channel = np.array([
[-10, 5],
[-3, -7]
])
expected_kernels = np.dstack(
[expected_kernels_first_channel, expected_kernels_second_channel]).reshape(1, 2, 2, 2)
assert np.all(expected_kernels == convolution.kernels)
expected_first_image_gradients_first_channel = np.array([
[0, 0, 0],
[1, 3, 2],
[-1, -1, 0]
])
expected_first_image_gradients_second_channel = np.array([
[0, 0, 0],
[0, 6, 6],
[-3, -3, 0]
])
expected_first_image_gradients = np.dstack(
[expected_first_image_gradients_first_channel, expected_first_image_gradients_second_channel])
expected_second_image_gradients_first_channel = np.array([
[3, 6, 0],
[-4, 0, 4],
[1, -2, 0]
])
expected_second_image_gradients_second_channel = np.array([
[0, 18, 0],
[-9, -6, 12],
[3, -6, 0]
])
expected_second_image_gradients = np.dstack(
[expected_second_image_gradients_first_channel, expected_second_image_gradients_second_channel])
expected_image_gradients = np.array([expected_first_image_gradients, expected_second_image_gradients])
assert np.all(expected_image_gradients == actual_image_gradients)
def test_train_backward_two_3x3x2_images_two_2x2x2_kernels(self):
convolution = net.layers.Convolution2D(filters=2, rows=2, columns=2)
convolution.build(input_shape=(None, 3, 3, 2))
first_image_first_channel = np.array([
[3, 0, 1],
[1, 1, 0],
[-1, -1, 0]
])
first_image_second_channel = np.array([
[2, 0, -1],
[1, 1, 2],
[0, -2, 1]
])
first_image = np.dstack([first_image_first_channel, first_image_second_channel])
second_image_first_channel = np.array([
[0, 1, 1],
[0, 1, 0],
[-3, -1, 1]
])
second_image_second_channel = np.array([
[0, 2, -2],
[4, 0, 1],
[-1, -1, 0]
])
second_image = np.dstack([second_image_first_channel, second_image_second_channel])
images = np.array([first_image, second_image])
first_kernel_first_channel = np.array([
[1, 1],
[0, -2]
])
first_kernel_second_channel = np.array([
[0, 1],
[2, 0]
])
first_kernel = np.dstack([first_kernel_first_channel, first_kernel_second_channel])
second_kernel_first_channel = np.array([
[-1, 0],
[1, 0]
])
second_kernel_second_channel = np.array([
[2, -4],
[3, 0]
])
second_kernel = np.dstack([second_kernel_first_channel, second_kernel_second_channel])
kernels = np.array([first_kernel, second_kernel], dtype=np.float32)
# Overwrite kernels with known values
convolution.kernels = kernels
# Overwrite biases with known values
convolution.biases = np.array([1, 4], dtype=np.float32)
expected_first_activation_first_channel = np.array([
[4, 3],
[6, 0]
]).reshape(2, 2, 1)
expected_first_activation_second_channel = np.array([
[9, 12],
[0, 0]
]).reshape(2, 2, 1)
expected_first_activation = np.dstack(
[expected_first_activation_first_channel, expected_first_activation_second_channel])
expected_second_activation_first_channel = np.array([
[10, 1],
[2, 0]
]).reshape(2, 2, 1)
expected_second_activation_second_channel = np.array([
[8, 16],
[6, 0]
]).reshape(2, 2, 1)
expected_second_activation = np.dstack(
[expected_second_activation_first_channel, expected_second_activation_second_channel])
expected_activations = np.array([expected_first_activation, expected_second_activation])
actual_activations = convolution.train_forward(images)
assert np.all(expected_activations == actual_activations)
first_image_gradients_first_channel = np.array([
[1, -1],
[0, 3]
]).reshape(2, 2, 1)
first_image_gradients_second_channel = np.array([
[3, 2],
[0, 1]
]).reshape(2, 2, 1)
first_image_gradients = np.dstack(
[first_image_gradients_first_channel, first_image_gradients_second_channel])
second_image_gradients_first_channel = np.array([
[2, 1],
[0, 2]
]).reshape(2, 2, 1)
second_image_gradients_second_channel = np.array([
[0, 3],
[1, 1]
]).reshape(2, 2, 1)
second_image_gradients = np.dstack(
[second_image_gradients_first_channel, second_image_gradients_second_channel])
gradients = np.array([first_image_gradients, second_image_gradients])
learning_rate = 2
actual_image_gradients = convolution.train_backward(gradients, learning_rate)
expected_biases = np.array([-2, -5])
assert np.all(expected_biases == convolution.biases)
expected_first_kernel_first_channel = np.array([
[-3, -1],
[-1, -5]
])
expected_first_kernel_second_channel = np.array([
[-4, -2],
[-6, 0]
])
expected_first_kernel = np.dstack([expected_first_kernel_first_channel, expected_first_kernel_second_channel])
expected_second_kernel_first_channel = np.array([
[-13, -6],
[-4, -2]
])
expected_second_kernel_second_channel = np.array([
[-14, 4],
[-1, -9]
])
expected_second_kernel = np.dstack([expected_second_kernel_first_channel, expected_second_kernel_second_channel])
expected_kernels = np.array([expected_first_kernel, expected_second_kernel])
assert np.all(expected_kernels == convolution.kernels)
expected_first_image_gradients_first_channel = np.array([
[-2, -2, -1],
[3, 0, 2],
[0, 0, 0]
])
expected_first_image_gradients_second_channel = np.array([
[6, -7, -9],
[11, 4, 0],
[0, 0, 0]
])
expected_first_image_gradients = np.dstack(
[expected_first_image_gradients_first_channel, expected_first_image_gradients_second_channel])
expected_second_image_gradients_first_channel = np.array([
[2, 0, 1],
[-1, -1, -2],
[1, 0, 0]
])
expected_second_image_gradients_second_channel = np.array([
[0, 8, -11],
[6, 7, 0],
[3, 0, 0]
])
expected_second_image_gradients = np.dstack(
[expected_second_image_gradients_first_channel, expected_second_image_gradients_second_channel])
expected_image_gradients = np.array([expected_first_image_gradients, expected_second_image_gradients])
assert np.all(expected_image_gradients == actual_image_gradients)
|
GPCsolutions/mod-webui | refs/heads/master | module/plugins/eue/test.py | 3 |
from pymongo.connection import Connection
id="GLPI"
records = []
con = Connection('localhost')
db = con.shinken
app = None # app is not otherwise
eueid = ""
parts = eueid.split(".")
parts.pop(0)
id = ".".join(parts)
records=[]
for feature in db.eue.find({'key': { '$regex': id } }).sort("start_time",1).limit(100):
date = feature["start_time"]
failed = 0
succeed = 0
total = 0
for scenario,scenario_data in feature["scenarios"].items():
if scenario_data["status"] == 0:
succeed += 1
else:
failed += 1
total = succeed + failed
records.append({
"date" : int(date),
"succeed" : succeed,
"failed" : failed,
"total" : total
})
print records
|
gavioto/tapiriik | refs/heads/master | tapiriik/services/RideWithGPS/rwgps.py | 9 | import os
from datetime import datetime, timedelta
import dateutil.parser
import pytz
from dateutil.tz import tzutc
import requests
from django.core.urlresolvers import reverse
from tapiriik.settings import WEB_ROOT, RWGPS_APIKEY
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.database import cachedb
from tapiriik.services.interchange import UploadedActivity, ActivityType, Waypoint, WaypointType, Location, ActivityStatistic, ActivityStatisticUnit
from tapiriik.services.api import APIException, APIWarning, APIExcludeActivity, UserException, UserExceptionType
from tapiriik.services.fit import FITIO
from tapiriik.services.tcx import TCXIO
from tapiriik.services.sessioncache import SessionCache
import logging
logger = logging.getLogger(__name__)
class RideWithGPSService(ServiceBase):
ID = "rwgps"
DisplayName = "Ride With GPS"
DisplayAbbreviation = "RWG"
AuthenticationType = ServiceAuthenticationType.UsernamePassword
RequiresExtendedAuthorizationDetails = True
# RWGPS does has a "recreation_types" list, but it is not actually used anywhere (yet)
# (This is a subset of the things returned by that list for future reference...)
_activityMappings = {
"running": ActivityType.Running,
"cycling": ActivityType.Cycling,
"mountain biking": ActivityType.MountainBiking,
"Hiking": ActivityType.Hiking,
"all": ActivityType.Other # everything will eventually resolve to this
}
SupportedActivities = [ActivityType.Cycling, ActivityType.MountainBiking]
SupportsHR = SupportsCadence = True
_sessionCache = SessionCache("rwgps", lifetime=timedelta(minutes=30), freshen_on_get=True)
def _add_auth_params(self, params=None, record=None):
"""
Adds apikey and authorization (email/password) to the passed-in params,
returns modified params dict.
"""
from tapiriik.auth.credential_storage import CredentialStore
if params is None:
params = {}
params['apikey'] = RWGPS_APIKEY
if record:
cached = self._sessionCache.Get(record.ExternalID)
if cached:
return cached
password = CredentialStore.Decrypt(record.ExtendedAuthorization["Password"])
email = CredentialStore.Decrypt(record.ExtendedAuthorization["Email"])
params['email'] = email
params['password'] = password
return params
def WebInit(self):
self.UserAuthorizationURL = WEB_ROOT + reverse("auth_simple", kwargs={"service": self.ID})
def Authorize(self, email, password):
from tapiriik.auth.credential_storage import CredentialStore
res = requests.get("https://ridewithgps.com/users/current.json",
params={'email': email, 'password': password, 'apikey': RWGPS_APIKEY})
if res.status_code == 401:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
res.raise_for_status()
res = res.json()
if res["user"] is None:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
member_id = res["user"]["id"]
if not member_id:
raise APIException("Unable to retrieve id", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (member_id, {}, {"Email": CredentialStore.Encrypt(email), "Password": CredentialStore.Encrypt(password)})
def _duration_to_seconds(self, s):
"""
Converts a duration in form HH:MM:SS to number of seconds for use in timedelta construction.
"""
hours, minutes, seconds = (["0", "0"] + s.split(":"))[-3:]
hours = int(hours)
minutes = int(minutes)
seconds = float(seconds)
total_seconds = int(hours + 60000 * minutes + 1000 * seconds)
return total_seconds
def DownloadActivityList(self, serviceRecord, exhaustive=False):
def mapStatTriple(act, stats_obj, key, units):
if "%s_max" % key in act and act["%s_max" % key]:
stats_obj.update(ActivityStatistic(units, max=float(act["%s_max" % key])))
if "%s_min" % key in act and act["%s_min" % key]:
stats_obj.update(ActivityStatistic(units, min=float(act["%s_min" % key])))
if "%s_avg" % key in act and act["%s_avg" % key]:
stats_obj.update(ActivityStatistic(units, avg=float(act["%s_avg" % key])))
# http://ridewithgps.com/users/1/trips.json?limit=200&order_by=created_at&order_dir=asc
# offset also supported
activities = []
exclusions = []
# They don't actually support paging right now, for whatever reason
params = self._add_auth_params({}, record=serviceRecord)
res = requests.get("http://ridewithgps.com/users/{}/trips.json".format(serviceRecord.ExternalID), params=params)
res = res.json()
# Apparently some API users are seeing this new result format - I'm not
if type(res) is dict:
res = res.get("results", [])
if res == []:
return [], [] # No activities
for act in res:
if "distance" not in act:
exclusions.append(APIExcludeActivity("No distance", activity_id=act["id"], user_exception=UserException(UserExceptionType.Corrupt)))
continue
if "duration" not in act or not act["duration"]:
exclusions.append(APIExcludeActivity("No duration", activity_id=act["id"], user_exception=UserException(UserExceptionType.Corrupt)))
continue
activity = UploadedActivity()
logger.debug("Name " + act["name"] + ":")
if len(act["name"].strip()):
activity.Name = act["name"]
if len(act["description"].strip()):
activity.Notes = act["description"]
activity.GPS = act["is_gps"]
activity.Stationary = not activity.GPS # I think
# 0 = public, 1 = private, 2 = friends
activity.Private = act["visibility"] == 1
activity.StartTime = dateutil.parser.parse(act["departed_at"])
try:
activity.TZ = pytz.timezone(act["time_zone"])
except pytz.exceptions.UnknownTimeZoneError:
# Sometimes the time_zone returned isn't quite what we'd like it
# So, just pull the offset from the datetime
if isinstance(activity.StartTime.tzinfo, tzutc):
activity.TZ = pytz.utc # The dateutil tzutc doesn't have an _offset value.
else:
activity.TZ = pytz.FixedOffset(activity.StartTime.tzinfo.utcoffset(activity.StartTime).total_seconds() / 60)
activity.StartTime = activity.StartTime.replace(tzinfo=activity.TZ) # Overwrite dateutil's sillyness
activity.EndTime = activity.StartTime + timedelta(seconds=self._duration_to_seconds(act["duration"]))
logger.debug("Activity s/t " + str(activity.StartTime))
activity.AdjustTZ()
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, float(act["distance"]))
mapStatTriple(act, activity.Stats.Power, "watts", ActivityStatisticUnit.Watts)
mapStatTriple(act, activity.Stats.Speed, "speed", ActivityStatisticUnit.KilometersPerHour)
mapStatTriple(act, activity.Stats.Cadence, "cad", ActivityStatisticUnit.RevolutionsPerMinute)
mapStatTriple(act, activity.Stats.HR, "hr", ActivityStatisticUnit.BeatsPerMinute)
if "elevation_gain" in act and act["elevation_gain"]:
activity.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(act["elevation_gain"])))
if "elevation_loss" in act and act["elevation_loss"]:
activity.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, loss=float(act["elevation_loss"])))
# Activity type is not implemented yet in RWGPS results; we will assume cycling, though perhaps "OTHER" wouuld be correct
activity.Type = ActivityType.Cycling
activity.CalculateUID()
activity.ServiceData = {"ActivityID": act["id"]}
activities.append(activity)
return activities, exclusions
def DownloadActivity(self, serviceRecord, activity):
if activity.Stationary:
return activity # Nothing more to download - it doesn't serve these files for manually entered activites
# https://ridewithgps.com/trips/??????.tcx
activityID = activity.ServiceData["ActivityID"]
res = requests.get("https://ridewithgps.com/trips/{}.tcx".format(activityID),
params=self._add_auth_params({'sub_format': 'history'}, record=serviceRecord))
try:
TCXIO.Parse(res.content, activity)
except ValueError as e:
raise APIExcludeActivity("TCX parse error " + str(e), user_exception=UserException(UserExceptionType.Corrupt))
return activity
def UploadActivity(self, serviceRecord, activity):
# https://ridewithgps.com/trips.json
fit_file = FITIO.Dump(activity)
files = {"data_file": ("tap-sync-" + str(os.getpid()) + "-" + activity.UID + ".fit", fit_file)}
params = {}
params['trip[name]'] = activity.Name
params['trip[description]'] = activity.Notes
if activity.Private:
params['trip[visibility]'] = 1 # Yes, this logic seems backwards but it's how it works
res = requests.post("https://ridewithgps.com/trips.json", files=files,
params=self._add_auth_params(params, record=serviceRecord))
if res.status_code % 100 == 4:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
res.raise_for_status()
res = res.json()
if res["success"] != 1:
raise APIException("Unable to upload activity")
def RevokeAuthorization(self, serviceRecord):
# nothing to do here...
pass
def DeleteCachedData(self, serviceRecord):
# nothing cached...
pass
|
biospi/mzmlb | refs/heads/master | pwiz/libraries/boost-build/tools/types/asm.py | 26 | # Copyright Craig Rodrigues 2005.
# Copyright (c) 2008 Steven Watanabe
#
# Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from b2.build import type as type_
from b2.manager import get_manager
from b2.tools.cast import cast
from b2.util import bjam_signature
MANAGER = get_manager()
PROJECT_REGISTRY = MANAGER.projects()
# maps project.name() + type to type
_project_types = {}
type_.register_type('ASM', ['s', 'S', 'asm'])
@bjam_signature((['type_'], ['sources', '*'], ['name', '?']))
def set_asm_type(type_, sources, name=''):
project = PROJECT_REGISTRY.current()
_project_types[project.name() + type_] = _project_types.get(
project.name() + type_, type_) + '_'
name = name if name else _project_types[project.name() + type_]
type_ += '.asm'
cast(name, type_.upper(), sources, [], [], [])
PROJECT_REGISTRY.add_rule("set-asm-type", set_asm_type)
|
kcompher/thunder | refs/heads/master | thunder/factorization/svd.py | 8 | """
Class for performing Singular Value Decomposition
"""
from numpy import zeros, shape
from thunder.utils.common import checkParams
from thunder.rdds.series import Series
from thunder.rdds.matrices import RowMatrix
class SVD(object):
"""
Singular value decomposition on a distributed matrix.
Parameters
----------
k : int, optional, default = 3
Number of singular vectors to estimate
method : string, optional, default = "auto"
Whether to use a direct or iterative method.
If set to 'direct', will compute the SVD with direct gramian matrix estimation and eigenvector decomposition.
If set to 'em', will approximate the SVD using iterative expectation-maximization algorithm.
If set to 'auto', will use 'em' if number of columns in input data exceeds 750, otherwise will use 'direct'.
maxIter : int, optional, default = 20
Maximum number of iterations if using an iterative method
tol : float, optional, default = 0.00001
Tolerance for convergence of iterative algorithm
Attributes
----------
`u` : RowMatrix, nrows, each of shape (k,)
Left singular vectors
`s` : array, shape(nrows,)
Singular values
`v` : array, shape (k, ncols)
Right singular vectors
"""
def __init__(self, k=3, method="auto", maxIter=20, tol=0.00001):
self.k = k
self.method = method
self.maxIter = maxIter
self.tol = tol
self.u = None
self.s = None
self.v = None
def calc(self, mat):
"""
Calcuate singular vectors
Parameters
----------
mat : Series or a subclass (e.g. RowMatrix)
Matrix to compute singular vectors from
Returns
----------
self : returns an instance of self.
"""
from numpy import argsort, dot, outer, random, sqrt, sum
from scipy.linalg import inv, orth
from numpy.linalg import eigh
if not (isinstance(mat, Series)):
raise Exception('Input must be Series or a subclass (e.g. RowMatrix)')
if not (isinstance(mat, RowMatrix)):
mat = mat.toRowMatrix()
checkParams(self.method, ['auto', 'direct', 'em'])
if self.method == 'auto':
if len(mat.index) < 750:
method = 'direct'
else:
method = 'em'
else:
method = self.method
if method == 'direct':
# get the normalized gramian matrix
cov = mat.gramian() / mat.nrows
# do a local eigendecomposition
eigw, eigv = eigh(cov)
inds = argsort(eigw)[::-1]
s = sqrt(eigw[inds[0:self.k]]) * sqrt(mat.nrows)
v = eigv[:, inds[0:self.k]].T
# project back into data, normalize by singular values
u = mat.times(v.T / s)
self.u = u
self.s = s
self.v = v
if method == 'em':
# initialize random matrix
c = random.rand(self.k, mat.ncols)
niter = 0
error = 100
# define an accumulator
from pyspark.accumulators import AccumulatorParam
class MatrixAccumulatorParam(AccumulatorParam):
def zero(self, value):
return zeros(shape(value))
def addInPlace(self, val1, val2):
val1 += val2
return val1
# define an accumulator function
global runSum
def outerSumOther(x, y):
global runSum
runSum += outer(x, dot(x, y))
# iterative update subspace using expectation maximization
# e-step: x = (c'c)^-1 c' y
# m-step: c = y x' (xx')^-1
while (niter < self.maxIter) & (error > self.tol):
cOld = c
# pre compute (c'c)^-1 c'
cInv = dot(c.T, inv(dot(c, c.T)))
# compute (xx')^-1 through a map reduce
xx = mat.times(cInv).gramian()
xxInv = inv(xx)
# pre compute (c'c)^-1 c' (xx')^-1
preMult2 = mat.rdd.context.broadcast(dot(cInv, xxInv))
# compute the new c using an accumulator
# direct approach: c = mat.rows().map(lambda x: outer(x, dot(x, premult2.value))).sum()
runSum = mat.rdd.context.accumulator(zeros((mat.ncols, self.k)), MatrixAccumulatorParam())
mat.rows().foreach(lambda x: outerSumOther(x, preMult2.value))
c = runSum.value
# transpose result
c = c.T
error = sum(sum((c - cOld) ** 2))
niter += 1
# project data into subspace spanned by columns of c
# use standard eigendecomposition to recover an orthonormal basis
c = orth(c.T)
cov = mat.times(c).gramian() / mat.nrows
eigw, eigv = eigh(cov)
inds = argsort(eigw)[::-1]
s = sqrt(eigw[inds[0:self.k]]) * sqrt(mat.nrows)
v = dot(eigv[:, inds[0:self.k]].T, c.T)
u = mat.times(v.T / s)
self.u = u
self.s = s
self.v = v
return self
|
gangadhar-kadam/sms-erpnext | refs/heads/master | selling/doctype/sales_order_item/sales_order_item.py | 483 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl |
Marco57/gsb | refs/heads/master | vendor/doctrine/orm/docs/en/conf.py | 2448 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
amir343/ansible | refs/heads/devel | lib/ansible/inventory/script.py | 80 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import sys
from collections import Mapping
from ansible import constants as C
from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self._loader = loader
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
self.filename = os.path.abspath(filename)
cmd = [ self.filename, "--list" ]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
self.host_vars_from_top = None
self.groups = self._parse(stderr)
def _parse(self, err):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
try:
self.raw = self._loader.load(self.data)
except Exception as e:
sys.stderr.write(err + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e)))
if not isinstance(self.raw, Mapping):
sys.stderr.write(err + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename))
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
groups = dict(all=all)
group = None
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
# a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each
# host. This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
if group_name == '_meta':
if 'hostvars' in data:
self.host_vars_from_top = data['hostvars']
continue
if group_name != all.name:
group = groups[group_name] = Group(group_name)
else:
group = all
host = None
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
if not hostname in all_hosts:
all_hosts[hostname] = Host(hostname)
host = all_hosts[hostname]
group.add_host(host)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
if group.name == all.name:
all.set_variable(k, v)
else:
group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
if group_name == '_meta':
continue
if isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
if child_name in groups:
groups[group_name].add_child_group(groups[child_name])
for group in groups.values():
if group.depth == 0 and group.name != 'all':
all.add_child_group(group)
return groups
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
if self.host_vars_from_top is not None:
got = self.host_vars_from_top.get(host.name, {})
return got
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.