repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
var_hash
int64
-9,223,186,179,200,150,000
9,223,291,175B
doc_hash
int64
-9,223,304,365,658,930,000
9,223,309,051B
line_mean
float64
3.5
99.8
line_max
int64
13
999
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
gormanb/mongo-python-driver
bson/__init__.py
14
32058
# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BSON (Binary JSON) encoding and decoding. """ import calendar import collections import datetime import itertools import re import struct import sys import uuid from codecs import (utf_8_decode as _utf_8_decode, utf_8_encode as _utf_8_encode) from bson.binary import (Binary, OLD_UUID_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY, UUIDLegacy) from bson.code import Code from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS from bson.dbref import DBRef from bson.errors import (InvalidBSON, InvalidDocument, InvalidStringData) from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.py3compat import (b, PY3, iteritems, text_type, string_type, reraise) from bson.regex import Regex from bson.son import SON, RE_TYPE from bson.timestamp import Timestamp from bson.tz_util import utc try: from bson import _cbson _USE_C = True except ImportError: _USE_C = False EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) BSONNUM = b"\x01" # Floating point BSONSTR = b"\x02" # UTF-8 string BSONOBJ = b"\x03" # Embedded document BSONARR = b"\x04" # Array BSONBIN = b"\x05" # Binary BSONUND = b"\x06" # Undefined BSONOID = b"\x07" # ObjectId BSONBOO = b"\x08" # Boolean BSONDAT = b"\x09" # UTC Datetime BSONNUL = b"\x0A" # Null BSONRGX = b"\x0B" # Regex BSONREF = b"\x0C" # DBRef BSONCOD = b"\x0D" # Javascript code BSONSYM = b"\x0E" # Symbol BSONCWS = b"\x0F" # Javascript code with scope BSONINT = b"\x10" # 32bit int BSONTIM = b"\x11" # Timestamp BSONLON = b"\x12" # 64bit int BSONMIN = b"\xFF" # Min key BSONMAX = b"\x7F" # Max key _UNPACK_FLOAT = struct.Struct("<d").unpack _UNPACK_INT = struct.Struct("<i").unpack _UNPACK_LENGTH_SUBTYPE = struct.Struct("<iB").unpack _UNPACK_LONG = struct.Struct("<q").unpack _UNPACK_TIMESTAMP = struct.Struct("<II").unpack def _get_int(data, position, dummy0, dummy1): """Decode a BSON int32 to python int.""" end = position + 4 return _UNPACK_INT(data[position:end])[0], end def _get_c_string(data, position, opts): """Decode a BSON 'C' string to python unicode string.""" end = data.index(b"\x00", position) return _utf_8_decode(data[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 def _get_float(data, position, dummy0, dummy1): """Decode a BSON double to python float.""" end = position + 8 return _UNPACK_FLOAT(data[position:end])[0], end def _get_string(data, position, obj_end, opts): """Decode a BSON string to python unicode string.""" length = _UNPACK_INT(data[position:position + 4])[0] position += 4 if length < 1 or obj_end - position < length: raise InvalidBSON("invalid string length") end = position + length - 1 if data[end:end + 1] != b"\x00": raise InvalidBSON("invalid end of string") return _utf_8_decode(data[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 def _get_object(data, position, obj_end, opts): """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size = _UNPACK_INT(data[position:position + 4])[0] end = position + obj_size - 1 if data[end:position + obj_size] != b"\x00": raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") obj = _elements_to_dict(data, position + 4, end, opts) position += obj_size if "$ref" in obj: return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position def _get_array(data, position, obj_end, opts): """Decode a BSON array to python list.""" size = _UNPACK_INT(data[position:position + 4])[0] end = position + size - 1 if data[end:end + 1] != b"\x00": raise InvalidBSON("bad eoo") position += 4 end -= 1 result = [] # Avoid doing global and attibute lookups in the loop. append = result.append index = data.index getter = _ELEMENT_GETTER while position < end: element_type = data[position:position + 1] # Just skip the keys. position = index(b'\x00', position) + 1 value, position = getter[element_type](data, position, obj_end, opts) append(value) return result, position + 1 def _get_binary(data, position, dummy, opts): """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5]) position += 5 if subtype == 2: length2 = _UNPACK_INT(data[position:position + 4])[0] position += 4 if length2 != length - 4: raise InvalidBSON("invalid binary (st 2) - lengths don't match!") length = length2 end = position + length if subtype in (3, 4): # Java Legacy uuid_representation = opts.uuid_representation if uuid_representation == JAVA_LEGACY: java = data[position:end] value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) # C# legacy elif uuid_representation == CSHARP_LEGACY: value = uuid.UUID(bytes_le=data[position:end]) # Python else: value = uuid.UUID(bytes=data[position:end]) return value, end # Python3 special case. Decode subtype 0 to 'bytes'. if PY3 and subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) return value, end def _get_oid(data, position, dummy0, dummy1): """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end def _get_boolean(data, position, dummy0, dummy1): """Decode a BSON true/false to python True/False.""" end = position + 1 return data[position:end] == b"\x01", end def _get_date(data, position, dummy, opts): """Decode a BSON datetime to python datetime.datetime.""" end = position + 8 millis = _UNPACK_LONG(data[position:end])[0] diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) / 1000 micros = diff * 1000 if opts.tz_aware: dt = EPOCH_AWARE + datetime.timedelta( seconds=seconds, microseconds=micros) if opts.tzinfo: dt = dt.astimezone(opts.tzinfo) else: dt = EPOCH_NAIVE + datetime.timedelta( seconds=seconds, microseconds=micros) return dt, end def _get_code(data, position, obj_end, opts): """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, position, obj_end, opts) return Code(code), position def _get_code_w_scope(data, position, obj_end, opts): """Decode a BSON code_w_scope to bson.code.Code.""" code, position = _get_string(data, position + 4, obj_end, opts) scope, position = _get_object(data, position, obj_end, opts) return Code(code, scope), position def _get_regex(data, position, dummy0, opts): """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, position, opts) bson_flags, position = _get_c_string(data, position, opts) bson_re = Regex(pattern, bson_flags) return bson_re, position def _get_ref(data, position, obj_end, opts): """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string(data, position, obj_end, opts) oid, position = _get_oid(data, position, obj_end, opts) return DBRef(collection, oid), position def _get_timestamp(data, position, dummy0, dummy1): """Decode a BSON timestamp to bson.timestamp.Timestamp.""" end = position + 8 inc, timestamp = _UNPACK_TIMESTAMP(data[position:end]) return Timestamp(timestamp, inc), end def _get_int64(data, position, dummy0, dummy1): """Decode a BSON int64 to bson.int64.Int64.""" end = position + 8 return Int64(_UNPACK_LONG(data[position:end])[0]), end # Each decoder function's signature is: # - data: bytes # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions _ELEMENT_GETTER = { BSONNUM: _get_float, BSONSTR: _get_string, BSONOBJ: _get_object, BSONARR: _get_array, BSONBIN: _get_binary, BSONUND: lambda w, x, y, z: (None, x), # Deprecated undefined BSONOID: _get_oid, BSONBOO: _get_boolean, BSONDAT: _get_date, BSONNUL: lambda w, x, y, z: (None, x), BSONRGX: _get_regex, BSONREF: _get_ref, # Deprecated DBPointer BSONCOD: _get_code, BSONSYM: _get_string, # Deprecated symbol BSONCWS: _get_code_w_scope, BSONINT: _get_int, BSONTIM: _get_timestamp, BSONLON: _get_int64, BSONMIN: lambda w, x, y, z: (MinKey(), x), BSONMAX: lambda w, x, y, z: (MaxKey(), x)} def _element_to_dict(data, position, obj_end, opts): """Decode a single key, value pair.""" element_type = data[position:position + 1] position += 1 element_name, position = _get_c_string(data, position, opts) value, position = _ELEMENT_GETTER[element_type](data, position, obj_end, opts) return element_name, value, position def _elements_to_dict(data, position, obj_end, opts): """Decode a BSON document.""" result = opts.document_class() end = obj_end - 1 while position < end: (key, value, position) = _element_to_dict(data, position, obj_end, opts) result[key] = value return result def _bson_to_dict(data, opts): """Decode a BSON string to document_class.""" try: obj_size = _UNPACK_INT(data[:4])[0] except struct.error as exc: raise InvalidBSON(str(exc)) if obj_size != len(data): raise InvalidBSON("invalid object size") if data[obj_size - 1:obj_size] != b"\x00": raise InvalidBSON("bad eoo") try: return _elements_to_dict(data, 4, obj_size - 1, opts) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb) if _USE_C: _bson_to_dict = _cbson._bson_to_dict _PACK_FLOAT = struct.Struct("<d").pack _PACK_INT = struct.Struct("<i").pack _PACK_LENGTH_SUBTYPE = struct.Struct("<iB").pack _PACK_LONG = struct.Struct("<q").pack _PACK_TIMESTAMP = struct.Struct("<II").pack _LIST_NAMES = tuple(b(str(i)) + b"\x00" for i in range(1000)) def gen_list_name(): """Generate "keys" for encoded lists in the sequence b"0\x00", b"1\x00", b"2\x00", ... The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly. """ for name in _LIST_NAMES: yield name counter = itertools.count(1000) while True: yield b(str(next(counter))) + b"\x00" def _make_c_string_check(string): """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return _utf_8_encode(string)[0] + b"\x00" def _make_c_string(string): """Make a 'C' string.""" if isinstance(string, bytes): try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: return _utf_8_encode(string)[0] + b"\x00" if PY3: def _make_name(string): """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return _utf_8_encode(string)[0] + b"\x00" else: # Keys can be unicode or bytes in python 2. _make_name = _make_c_string_check def _encode_float(name, value, dummy0, dummy1): """Encode a float.""" return b"\x01" + name + _PACK_FLOAT(value) if PY3: def _encode_bytes(name, value, dummy0, dummy1): """Encode a python bytes.""" # Python3 special case. Store 'bytes' as BSON binary subtype 0. return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value else: def _encode_bytes(name, value, dummy0, dummy1): """Encode a python str (python 2.x).""" try: _utf_8_decode(value, None, True) except UnicodeError: raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % (value,)) return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" def _encode_mapping(name, value, check_keys, opts): """Encode a mapping type.""" data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in iteritems(value)]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" def _encode_dbref(name, value, check_keys, opts): """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 buf += _name_value_to_bson(b"$ref\x00", value.collection, check_keys, opts) buf += _name_value_to_bson(b"$id\x00", value.id, check_keys, opts) if value.database is not None: buf += _name_value_to_bson( b"$db\x00", value.database, check_keys, opts) for key, val in iteritems(value._DBRef__kwargs): buf += _element_to_bson(key, val, check_keys, opts) buf += b"\x00" buf[begin:begin + 4] = _PACK_INT(len(buf) - begin) return bytes(buf) def _encode_list(name, value, check_keys, opts): """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" def _encode_text(name, value, dummy0, dummy1): """Encode a python unicode (python 2.x) / str (python 3.x).""" value = _utf_8_encode(value)[0] return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" def _encode_binary(name, value, dummy0, dummy1): """Encode bson.binary.Binary.""" subtype = value.subtype if subtype == 2: value = _PACK_INT(len(value)) + value return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value def _encode_uuid(name, value, dummy, opts): """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation # Python Legacy Common Case if uuid_representation == OLD_UUID_SUBTYPE: return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes # Java Legacy elif uuid_representation == JAVA_LEGACY: from_uuid = value.bytes data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data # C# legacy elif uuid_representation == CSHARP_LEGACY: # Microsoft GUID representation. return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le # New else: return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes def _encode_objectid(name, value, dummy0, dummy1): """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary def _encode_bool(name, value, dummy0, dummy1): """Encode a python boolean (True/False).""" return b"\x08" + name + (value and b"\x01" or b"\x00") def _encode_datetime(name, value, dummy0, dummy1): """Encode datetime.datetime.""" if value.utcoffset() is not None: value = value - value.utcoffset() millis = int(calendar.timegm(value.timetuple()) * 1000 + value.microsecond / 1000) return b"\x09" + name + _PACK_LONG(millis) def _encode_none(name, dummy0, dummy1, dummy2): """Encode python None.""" return b"\x0A" + name def _encode_regex(name, value, dummy0, dummy1): """Encode a python regex or bson.regex.Regex.""" flags = value.flags # Python 2 common case if flags == 0: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" # Python 3 common case elif flags == re.UNICODE: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" else: sflags = b"" if flags & re.IGNORECASE: sflags += b"i" if flags & re.LOCALE: sflags += b"l" if flags & re.MULTILINE: sflags += b"m" if flags & re.DOTALL: sflags += b"s" if flags & re.UNICODE: sflags += b"u" if flags & re.VERBOSE: sflags += b"x" sflags += b"\x00" return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags def _encode_code(name, value, dummy, opts): """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) if not value.scope: return b"\x0D" + name + _PACK_INT(cstrlen) + cstring scope = _dict_to_bson(value.scope, False, opts, False) full_length = _PACK_INT(8 + cstrlen + len(scope)) return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope def _encode_int(name, value, dummy0, dummy1): """Encode a python int.""" if -2147483648 <= value <= 2147483647: return b"\x10" + name + _PACK_INT(value) else: try: return b"\x12" + name + _PACK_LONG(value) except struct.error: raise OverflowError("BSON can only handle up to 8-byte ints") def _encode_timestamp(name, value, dummy0, dummy1): """Encode bson.timestamp.Timestamp.""" return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) def _encode_long(name, value, dummy0, dummy1): """Encode a python long (python 2.x)""" try: return b"\x12" + name + _PACK_LONG(value) except struct.error: raise OverflowError("BSON can only handle up to 8-byte ints") def _encode_minkey(name, dummy0, dummy1, dummy2): """Encode bson.min_key.MinKey.""" return b"\xFF" + name def _encode_maxkey(name, dummy0, dummy1, dummy2): """Encode bson.max_key.MaxKey.""" return b"\x7F" + name # Each encoder function's signature is: # - name: utf-8 bytes # - value: a Python data type, e.g. a Python int for _encode_int # - check_keys: bool, whether to check for invalid names # - opts: a CodecOptions _ENCODERS = { bool: _encode_bool, bytes: _encode_bytes, datetime.datetime: _encode_datetime, dict: _encode_mapping, float: _encode_float, int: _encode_int, list: _encode_list, # unicode in py2, str in py3 text_type: _encode_text, tuple: _encode_list, type(None): _encode_none, uuid.UUID: _encode_uuid, Binary: _encode_binary, Int64: _encode_long, Code: _encode_code, DBRef: _encode_dbref, MaxKey: _encode_maxkey, MinKey: _encode_minkey, ObjectId: _encode_objectid, Regex: _encode_regex, RE_TYPE: _encode_regex, SON: _encode_mapping, Timestamp: _encode_timestamp, UUIDLegacy: _encode_binary, # Special case. This will never be looked up directly. collections.Mapping: _encode_mapping, } _MARKERS = { 5: _encode_binary, 7: _encode_objectid, 11: _encode_regex, 13: _encode_code, 17: _encode_timestamp, 18: _encode_long, 100: _encode_dbref, 127: _encode_maxkey, 255: _encode_minkey, } if not PY3: _ENCODERS[long] = _encode_long def _name_value_to_bson(name, value, check_keys, opts): """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: return _ENCODERS[type(value)](name, value, check_keys, opts) except KeyError: pass # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our # custom types that subclasses a python built-in (e.g. Binary) marker = getattr(value, "_type_marker", None) if isinstance(marker, int) and marker in _MARKERS: func = _MARKERS[marker] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func return func(name, value, check_keys, opts) # If all else fails test each base type. This will only happen once for # a subtype of a supported base type. for base in _ENCODERS: if isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func return func(name, value, check_keys, opts) raise InvalidDocument("cannot convert value of type %s to bson" % type(value)) def _element_to_bson(key, value, check_keys, opts): """Encode a single key, value pair.""" if not isinstance(key, string_type): raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) if "." in key: raise InvalidDocument("key %r must not contain '.'" % (key,)) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts) def _dict_to_bson(doc, check_keys, opts, top_level=True): """Encode a document to BSON.""" try: elements = [] if top_level and "_id" in doc: elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for (key, value) in iteritems(doc): if not top_level or key != "_id": elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" if _USE_C: _dict_to_bson = _cbson._dict_to_bson _CODEC_OPTIONS_TYPE_ERROR = TypeError( "codec_options must be an instance of CodecOptions") def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): """Decode BSON data to multiple documents. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Removed `compile_re` option: PyMongo now always represents BSON regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionchanged:: 2.7 Added `compile_re` option. If set to False, PyMongo represented BSON regular expressions as :class:`~bson.regex.Regex` objects instead of attempting to compile BSON regular expressions as Python native regular expressions, thus preventing errors for some incompatible patterns, see `PYTHON-500`_. .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR docs = [] position = 0 end = len(data) - 1 try: while position < end: obj_size = _UNPACK_INT(data[position:position + 4])[0] if len(data) - position < obj_size: raise InvalidBSON("invalid object size") obj_end = position + obj_size - 1 if data[obj_end:position + obj_size] != b"\x00": raise InvalidBSON("bad eoo") docs.append(_elements_to_dict(data, position + 4, obj_end, codec_options)) position += obj_size return docs except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb) if _USE_C: decode_all = _cbson.decode_all def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = len(data) - 1 while position < end: obj_size = _UNPACK_INT(data[position:position + 4])[0] elements = data[position:position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options) def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object in chunks and parses bson in chunks, yielding one document at a time. :Parameters: - `file_obj`: A file object containing BSON data. - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8 """ while True: # Read size of next object. size_data = file_obj.read(4) if len(size_data) == 0: break # Finished with file normaly. elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT(size_data)[0] - 4 elements = size_data + file_obj.read(obj_size) yield _bson_to_dict(elements, codec_options) def is_valid(bson): """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of :class:`str` (:class:`bytes` in python 3). Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. :Parameters: - `bson`: the data to be validated """ if not isinstance(bson, bytes): raise TypeError("BSON data must be an instance of a subclass of bytes") try: _bson_to_dict(bson, DEFAULT_CODEC_OPTIONS) return True except Exception: return False class BSON(bytes): """BSON (Binary JSON) data. """ @classmethod def encode(cls, document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of :class:`basestring` (:class:`str` in python 3). Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. :Parameters: - `document`: mapping type representing a document - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `uuid_subtype` option with `codec_options`. """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR return cls(_dict_to_bson(document, check_keys, codec_options)) def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): """Decode this BSON data. By default, returns a BSON document represented as a Python :class:`dict`. To use a different :class:`MutableMapping` class, configure a :class:`~bson.codec_options.CodecOptions`:: >>> import collections # From Python standard library. >>> import bson >>> from bson.codec_options import CodecOptions >>> data = bson.BSON.encode({'a': 1}) >>> decoded_doc = bson.BSON.decode(data) <type 'dict'> >>> options = CodecOptions(document_class=collections.OrderedDict) >>> decoded_doc = bson.BSON.decode(data, codec_options=options) >>> type(decoded_doc) <class 'collections.OrderedDict'> :Parameters: - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Removed `compile_re` option: PyMongo now always represents BSON regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionchanged:: 2.7 Added `compile_re` option. If set to False, PyMongo represented BSON regular expressions as :class:`~bson.regex.Regex` objects instead of attempting to compile BSON regular expressions as Python native regular expressions, thus preventing errors for some incompatible patterns, see `PYTHON-500`_. .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR return _bson_to_dict(self, codec_options) def has_c(): """Is the C extension installed? """ return _USE_C
apache-2.0
3,775,936,825,650,669,000
7,483,008,196,307,441,000
32.959746
80
0.607617
false
kived/plyer
plyer/platforms/macosx/wifi.py
2
4929
from plyer.facades import Wifi from pyobjus.dylib_manager import load_framework, INCLUDE from pyobjus import autoclass load_framework(INCLUDE.Foundation) load_framework(INCLUDE.CoreWLAN) CWInterface = autoclass('CWInterface') CWNetwork = autoclass('CWNetwork') CWWiFiClient = autoclass('CWWiFiClient') NSArray = autoclass('NSArray') NSDictionary = autoclass('NSDictionary') NSString = autoclass('NSString') class OSXWifi(Wifi): names = {} def _is_enabled(self): ''' Returns `True` if the Wifi is enabled else returns `False`. ''' return CWWiFiClient.sharedWiFiClient().interface().powerOn() def _get_network_info(self, name): ''' Returns all the network information. ''' def ns(x): NSString.alloc().initWithUTF8String_(x) accessNetworkType = self.names[name].accessNetworkType aggregateRSSI = self.names[name].aggregateRSSI beaconInterval = self.names[name].beaconInterval bssid = self.names[name].bssid.UTF8String() countryCode = self.names[name].countryCode hasInternet = self.names[name].hasInternet hasInterworkingIE = self.names[name].hasInterworkingIE hessid = self.names[name].hessid ibss = self.names[name].ibss isAdditionalStepRequiredForAccess = \ self.names[name].isAdditionalStepRequiredForAccess isCarPlayNetwork = self.names[name].isCarPlayNetwork isEmergencyServicesReachable = \ self.names[name].isEmergencyServicesReachable isPasspoint = self.names[name].isPasspoint isPersonalHotspot = self.names[name].isPersonalHotspot isUnauthenticatedEmergencyServiceAccessible = \ self.names[name].isUnauthenticatedEmergencyServiceAccessible noiseMeasurement = self.names[name].noiseMeasurement physicalLayerMode = self.names[name].physicalLayerMode rssiValue = self.names[name].rssiValue securityType = self.names[name].securityType ssid = self.names[name].ssid.UTF8String() supportsEasyConnect = self.names[name].supportsEasyConnect supportsWPS = self.names[name].supportsWPS venueGroup = self.names[name].venueGroup venueType = self.names[name].venueType return {'accessNetworkType': accessNetworkType, 'aggregateRSSI': aggregateRSSI, 'beaconInterval': beaconInterval, 'bssid': bssid, 'countryCode': countryCode, 'hasInternet': hasInternet, 'hasInternet': hasInternet, 'hasInterworkingIE': hasInterworkingIE, 'hessid': hessid, 'ibss': ibss, 'isAdditionalStepRequiredForAccess': isAdditionalStepRequiredForAccess, 'isCarPlayNetwork': isCarPlayNetwork, 'isEmergencyServicesReachable': isEmergencyServicesReachable, 'isPasspoint': isPasspoint, 'isPersonalHotspot': isPersonalHotspot, 'isUnauthenticatedEmergencyServiceAccessible': isUnauthenticatedEmergencyServiceAccessible, 'noiseMeasurement': noiseMeasurement, 'physicalLayerMode': physicalLayerMode, 'rssiValue': rssiValue, 'securityType': securityType, 'ssid': ssid, 'supportsEasyConnect': supportsEasyConnect, 'supportsWPS': supportsWPS, 'venueGroup': venueGroup, 'venueType': venueType} def _start_scanning(self): ''' Starts scanning for available Wi-Fi networks. ''' if self._is_enabled(): self.names = {} c = CWInterface.interface() scan = c.scanForNetworksWithName_error_(None, None) cnt = scan.allObjects().count() for i in range(cnt): self.names[ scan.allObjects().objectAtIndex_(i).ssid.UTF8String()] \ = scan.allObjects().objectAtIndex_(i) else: raise Exception("Wifi not enabled.") def _get_available_wifi(self): ''' Returns the name of available networks. ''' return self.names.keys() def _connect(self, network, parameters): ''' Expects 2 parameters: - name/ssid of the network. - password: dict type ''' password = parameters['password'] network_object = self.names[network] CWInterface.interface().associateToNetwork_password_error_( network_object, password, None) return def _disconnect(self): ''' Disconnect from network. ''' CWInterface.interface().disassociate() return def instance(): return OSXWifi()
mit
4,307,981,552,051,220,000
3,893,941,304,114,250,000
36.06015
77
0.612092
false
evensonbryan/yocto-autobuilder
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_sslverify.py
5
21662
# Copyright 2005 Divmod, Inc. See LICENSE file for details # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.internet._sslverify}. """ import itertools try: from OpenSSL import SSL from OpenSSL.crypto import PKey, X509, X509Req from OpenSSL.crypto import TYPE_RSA from twisted.internet import _sslverify as sslverify except ImportError: pass from twisted.trial import unittest from twisted.internet import protocol, defer, reactor from twisted.python.reflect import objgrep, isSame from twisted.python import log from twisted.internet.error import CertificateError, ConnectionLost from twisted.internet import interfaces # A couple of static PEM-format certificates to be used by various tests. A_HOST_CERTIFICATE_PEM = """ -----BEGIN CERTIFICATE----- MIIC2jCCAkMCAjA5MA0GCSqGSIb3DQEBBAUAMIG0MQswCQYDVQQGEwJVUzEiMCAG A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo dXNldHRzMScwJQYJKoZIhvcNAQkBFhhub2JvZHlAdHdpc3RlZG1hdHJpeC5jb20x ETAPBgNVBAsTCFNlY3VyaXR5MB4XDTA2MDgxNjAxMDEwOFoXDTA3MDgxNjAxMDEw OFowgbQxCzAJBgNVBAYTAlVTMSIwIAYDVQQDExlleGFtcGxlLnR3aXN0ZWRtYXRy aXguY29tMQ8wDQYDVQQHEwZCb3N0b24xHDAaBgNVBAoTE1R3aXN0ZWQgTWF0cml4 IExhYnMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxJzAlBgkqhkiG9w0BCQEWGG5v Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp 8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls fXzCWdG0O/3Lk2SRM0I= -----END CERTIFICATE----- """ A_PEER_CERTIFICATE_PEM = """ -----BEGIN CERTIFICATE----- MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv bTERMA8GA1UECxMIU2VjdXJpdHkwHhcNMDYwODE2MDEwMTU2WhcNMDcwODE2MDEw MTU2WjCBtjELMAkGA1UEBhMCVVMxIjAgBgNVBAMTGWV4YW1wbGUudHdpc3RlZG1h dHJpeC5jb20xDzANBgNVBAcTBkJvc3RvbjEcMBoGA1UEChMTVHdpc3RlZCBNYXRy aXggTGFiczEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEpMCcGCSqGSIb3DQEJARYa c29tZWJvZHlAdHdpc3RlZG1hdHJpeC5jb20xETAPBgNVBAsTCFNlY3VyaXR5MIGf MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnm+WBlgFNbMlHehib9ePGGDXF+Nz4 CjGuUmVBaXCRCiVjg3kSDecwqfb0fqTksBZ+oQ1UBjMcSh7OcvFXJZnUesBikGWE JE4V8Bjh+RmbJ1ZAlUPZ40bAkww0OpyIRAGMvKG+4yLFTO4WDxKmfDcrOb6ID8WJ e1u+i3XGkIf/5QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAD4Oukm3YYkhedUepBEA vvXIQhVDqL7mk6OqYdXmNj6R7ZMC8WWvGZxrzDI1bZuB+4aIxxd1FXC3UOHiR/xg i9cDl1y8P/qRp4aEBNF6rI0D4AxTbfnHQx4ERDAOShJdYZs/2zifPJ6va6YvrEyr yqDtGhklsWW3ZwBzEh5VEOUp -----END CERTIFICATE----- """ counter = itertools.count().next def makeCertificate(**kw): keypair = PKey() keypair.generate_key(TYPE_RSA, 512) certificate = X509() certificate.gmtime_adj_notBefore(0) certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year for xname in certificate.get_issuer(), certificate.get_subject(): for (k, v) in kw.items(): setattr(xname, k, v) certificate.set_serial_number(counter()) certificate.set_pubkey(keypair) certificate.sign(keypair, "md5") return keypair, certificate class DataCallbackProtocol(protocol.Protocol): def dataReceived(self, data): d, self.factory.onData = self.factory.onData, None if d is not None: d.callback(data) def connectionLost(self, reason): d, self.factory.onLost = self.factory.onLost, None if d is not None: d.errback(reason) class WritingProtocol(protocol.Protocol): byte = 'x' def connectionMade(self): self.transport.write(self.byte) def connectionLost(self, reason): self.factory.onLost.errback(reason) class OpenSSLOptions(unittest.TestCase): serverPort = clientConn = None onServerLost = onClientLost = None sKey = None sCert = None cKey = None cCert = None def setUp(self): """ Create class variables of client and server certificates. """ self.sKey, self.sCert = makeCertificate( O="Server Test Certificate", CN="server") self.cKey, self.cCert = makeCertificate( O="Client Test Certificate", CN="client") def tearDown(self): if self.serverPort is not None: self.serverPort.stopListening() if self.clientConn is not None: self.clientConn.disconnect() L = [] if self.onServerLost is not None: L.append(self.onServerLost) if self.onClientLost is not None: L.append(self.onClientLost) return defer.DeferredList(L, consumeErrors=True) def loopback(self, serverCertOpts, clientCertOpts, onServerLost=None, onClientLost=None, onData=None): if onServerLost is None: self.onServerLost = onServerLost = defer.Deferred() if onClientLost is None: self.onClientLost = onClientLost = defer.Deferred() if onData is None: onData = defer.Deferred() serverFactory = protocol.ServerFactory() serverFactory.protocol = DataCallbackProtocol serverFactory.onLost = onServerLost serverFactory.onData = onData clientFactory = protocol.ClientFactory() clientFactory.protocol = WritingProtocol clientFactory.onLost = onClientLost self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts) self.clientConn = reactor.connectSSL('127.0.0.1', self.serverPort.getHost().port, clientFactory, clientCertOpts) def test_abbreviatingDistinguishedNames(self): """ Check that abbreviations used in certificates correctly map to complete names. """ self.assertEqual( sslverify.DN(CN='a', OU='hello'), sslverify.DistinguishedName(commonName='a', organizationalUnitName='hello')) self.assertNotEquals( sslverify.DN(CN='a', OU='hello'), sslverify.DN(CN='a', OU='hello', emailAddress='xxx')) dn = sslverify.DN(CN='abcdefg') self.assertRaises(AttributeError, setattr, dn, 'Cn', 'x') self.assertEqual(dn.CN, dn.commonName) dn.CN = 'bcdefga' self.assertEqual(dn.CN, dn.commonName) def testInspectDistinguishedName(self): n = sslverify.DN(commonName='common name', organizationName='organization name', organizationalUnitName='organizational unit name', localityName='locality name', stateOrProvinceName='state or province name', countryName='country name', emailAddress='email address') s = n.inspect() for k in [ 'common name', 'organization name', 'organizational unit name', 'locality name', 'state or province name', 'country name', 'email address']: self.assertIn(k, s, "%r was not in inspect output." % (k,)) self.assertIn(k.title(), s, "%r was not in inspect output." % (k,)) def testInspectDistinguishedNameWithoutAllFields(self): n = sslverify.DN(localityName='locality name') s = n.inspect() for k in [ 'common name', 'organization name', 'organizational unit name', 'state or province name', 'country name', 'email address']: self.assertNotIn(k, s, "%r was in inspect output." % (k,)) self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,)) self.assertIn('locality name', s) self.assertIn('Locality Name', s) def test_inspectCertificate(self): """ Test that the C{inspect} method of L{sslverify.Certificate} returns a human-readable string containing some basic information about the certificate. """ c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM) self.assertEqual( c.inspect().split('\n'), ["Certificate For Subject:", " Common Name: example.twistedmatrix.com", " Country Name: US", " Email Address: nobody@twistedmatrix.com", " Locality Name: Boston", " Organization Name: Twisted Matrix Labs", " Organizational Unit Name: Security", " State Or Province Name: Massachusetts", "", "Issuer:", " Common Name: example.twistedmatrix.com", " Country Name: US", " Email Address: nobody@twistedmatrix.com", " Locality Name: Boston", " Organization Name: Twisted Matrix Labs", " Organizational Unit Name: Security", " State Or Province Name: Massachusetts", "", "Serial Number: 12345", "Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18", "Public Key with Hash: ff33994c80812aa95a79cdb85362d054"]) def test_certificateOptionsSerialization(self): """ Test that __setstate__(__getstate__()) round-trips properly. """ firstOpts = sslverify.OpenSSLCertificateOptions( privateKey=self.sKey, certificate=self.sCert, method=SSL.SSLv3_METHOD, verify=True, caCerts=[self.sCert], verifyDepth=2, requireCertificate=False, verifyOnce=False, enableSingleUseKeys=False, enableSessions=False, fixBrokenPeers=True, enableSessionTickets=True) context = firstOpts.getContext() state = firstOpts.__getstate__() # The context shouldn't be in the state to serialize self.failIf(objgrep(state, context, isSame), objgrep(state, context, isSame)) opts = sslverify.OpenSSLCertificateOptions() opts.__setstate__(state) self.assertEqual(opts.privateKey, self.sKey) self.assertEqual(opts.certificate, self.sCert) self.assertEqual(opts.method, SSL.SSLv3_METHOD) self.assertEqual(opts.verify, True) self.assertEqual(opts.caCerts, [self.sCert]) self.assertEqual(opts.verifyDepth, 2) self.assertEqual(opts.requireCertificate, False) self.assertEqual(opts.verifyOnce, False) self.assertEqual(opts.enableSingleUseKeys, False) self.assertEqual(opts.enableSessions, False) self.assertEqual(opts.fixBrokenPeers, True) self.assertEqual(opts.enableSessionTickets, True) def test_certificateOptionsSessionTickets(self): """ Enabling session tickets should not set the OP_NO_TICKET option. """ opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=True) ctx = opts.getContext() self.assertEqual(0, ctx.set_options(0) & 0x00004000) def test_certificateOptionsSessionTicketsDisabled(self): """ Enabling session tickets should set the OP_NO_TICKET option. """ opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=False) ctx = opts.getContext() self.assertEqual(0x00004000, ctx.set_options(0) & 0x00004000) def test_allowedAnonymousClientConnection(self): """ Check that anonymous connections are allowed when certificates aren't required on the server. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, requireCertificate=False), sslverify.OpenSSLCertificateOptions( requireCertificate=False), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_refusedAnonymousClientConnection(self): """ Check that anonymous connections are refused when certificates are required on the server. """ onServerLost = defer.Deferred() onClientLost = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, caCerts=[self.sCert], requireCertificate=True), sslverify.OpenSSLCertificateOptions( requireCertificate=False), onServerLost=onServerLost, onClientLost=onClientLost) d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True) def afterLost(((cSuccess, cResult), (sSuccess, sResult))): self.failIf(cSuccess) self.failIf(sSuccess) # Win32 fails to report the SSL Error, and report a connection lost # instead: there is a race condition so that's not totally # surprising (see ticket #2877 in the tracker) self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost)) self.assertIsInstance(sResult.value, SSL.Error) return d.addCallback(afterLost) def test_failedCertificateVerification(self): """ Check that connecting with a certificate not accepted by the server CA fails. """ onServerLost = defer.Deferred() onClientLost = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False), sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=False, caCerts=[self.cCert]), onServerLost=onServerLost, onClientLost=onClientLost) d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True) def afterLost(((cSuccess, cResult), (sSuccess, sResult))): self.failIf(cSuccess) self.failIf(sSuccess) return d.addCallback(afterLost) def test_successfulCertificateVerification(self): """ Test a successful connection with client certificate validation on server side. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False), sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=True, caCerts=[self.sCert]), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_successfulSymmetricSelfSignedCertificateVerification(self): """ Test a successful connection with validation on both server and client sides. """ onData = defer.Deferred() self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, requireCertificate=True, caCerts=[self.cCert]), sslverify.OpenSSLCertificateOptions(privateKey=self.cKey, certificate=self.cCert, verify=True, requireCertificate=True, caCerts=[self.sCert]), onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) def test_verification(self): """ Check certificates verification building custom certificates data. """ clientDN = sslverify.DistinguishedName(commonName='client') clientKey = sslverify.KeyPair.generate() clientCertReq = clientKey.certificateRequest(clientDN) serverDN = sslverify.DistinguishedName(commonName='server') serverKey = sslverify.KeyPair.generate() serverCertReq = serverKey.certificateRequest(serverDN) clientSelfCertReq = clientKey.certificateRequest(clientDN) clientSelfCertData = clientKey.signCertificateRequest( clientDN, clientSelfCertReq, lambda dn: True, 132) clientSelfCert = clientKey.newCertificate(clientSelfCertData) serverSelfCertReq = serverKey.certificateRequest(serverDN) serverSelfCertData = serverKey.signCertificateRequest( serverDN, serverSelfCertReq, lambda dn: True, 516) serverSelfCert = serverKey.newCertificate(serverSelfCertData) clientCertData = serverKey.signCertificateRequest( serverDN, clientCertReq, lambda dn: True, 7) clientCert = clientKey.newCertificate(clientCertData) serverCertData = clientKey.signCertificateRequest( clientDN, serverCertReq, lambda dn: True, 42) serverCert = serverKey.newCertificate(serverCertData) onData = defer.Deferred() serverOpts = serverCert.options(serverSelfCert) clientOpts = clientCert.options(clientSelfCert) self.loopback(serverOpts, clientOpts, onData=onData) return onData.addCallback( lambda result: self.assertEqual(result, WritingProtocol.byte)) if interfaces.IReactorSSL(reactor, None) is None: OpenSSLOptions.skip = "Reactor does not support SSL, cannot run SSL tests" class _NotSSLTransport: def getHandle(self): return self class _MaybeSSLTransport: def getHandle(self): return self def get_peer_certificate(self): return None def get_host_certificate(self): return None class _ActualSSLTransport: def getHandle(self): return self def get_host_certificate(self): return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original def get_peer_certificate(self): return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original class Constructors(unittest.TestCase): def test_peerFromNonSSLTransport(self): """ Verify that peerFromTransport raises an exception if the transport passed is not actually an SSL transport. """ x = self.assertRaises(CertificateError, sslverify.Certificate.peerFromTransport, _NotSSLTransport()) self.failUnless(str(x).startswith("non-TLS")) def test_peerFromBlankSSLTransport(self): """ Verify that peerFromTransport raises an exception if the transport passed is an SSL transport, but doesn't have a peer certificate. """ x = self.assertRaises(CertificateError, sslverify.Certificate.peerFromTransport, _MaybeSSLTransport()) self.failUnless(str(x).startswith("TLS")) def test_hostFromNonSSLTransport(self): """ Verify that hostFromTransport raises an exception if the transport passed is not actually an SSL transport. """ x = self.assertRaises(CertificateError, sslverify.Certificate.hostFromTransport, _NotSSLTransport()) self.failUnless(str(x).startswith("non-TLS")) def test_hostFromBlankSSLTransport(self): """ Verify that hostFromTransport raises an exception if the transport passed is an SSL transport, but doesn't have a host certificate. """ x = self.assertRaises(CertificateError, sslverify.Certificate.hostFromTransport, _MaybeSSLTransport()) self.failUnless(str(x).startswith("TLS")) def test_hostFromSSLTransport(self): """ Verify that hostFromTransport successfully creates the correct certificate if passed a valid SSL transport. """ self.assertEqual( sslverify.Certificate.hostFromTransport( _ActualSSLTransport()).serialNumber(), 12345) def test_peerFromSSLTransport(self): """ Verify that peerFromTransport successfully creates the correct certificate if passed a valid SSL transport. """ self.assertEqual( sslverify.Certificate.peerFromTransport( _ActualSSLTransport()).serialNumber(), 12346) if interfaces.IReactorSSL(reactor, None) is None: Constructors.skip = "Reactor does not support SSL, cannot run SSL tests"
gpl-2.0
5,390,947,377,193,116,000
-1,885,280,378,485,382,400
37.820789
79
0.639138
false
hcseob/py_spectre
py_spectre/psf.py
1
50756
# -*- coding: latin-1 -*- """ Copyright (c) 2008 Pycircuit Development Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. c. Neither the name of the Pycircuit nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import unittest import struct, os, re import operator import numpy # import psfasc from copy import copy from struct import unpack, pack class PSFInvalid(Exception): pass def warning(str): print "Warning: "+str def indent(str, n=2): return "\n".join([' '*n+s for s in str.split("\n")]) class PSFData(object): @classmethod def fromFile(cls, file): obj = cls() obj.deSerializeFile(file) return obj size=None def __init__(self, value=None, extarg=None): self.value = value self.extarg = extarg def setValue(self, value): self.value = value def __eq__(self, a): return self.value == a def __cmp__(self, a): return cmp(self.value, a) def __hash__(self): return hash(self.value) def deSerializeFile(self, file): pass def getSize(self): self.size def getValue(self): return self.value def __str__(self): return str(self.value) def toPSFasc(self, prec=None): return str(self) def __repr__(self): return self.value.__repr__() class PSFNumber(PSFData): def __int__(self): return self.value def __add__(self, a): return UInt32(self.value+int(a)) def __mul__(self, a): return UInt32(self.value*int(a)) def __radd__(self, a): return UInt32(self.value+int(a)) def __sub__(self, a): return UInt32(self.value-int(a)) def __rsub__(self, a): return UInt32(int(a)-self.value) def __div__(self, a): return UInt32(self.value/int(a)) def __rdiv__(self, a): return UInt32(int(a)/self.value) def __floordiv__(self, a): return UInt32(self.value//int(a)) def __rfloordiv__(self, a): return UInt32(int(a)//self.value) def __mod__(self, a): return UInt32(self.value%int(a)) class Int8(PSFNumber): size=4 def deSerializeFile(self, file, size=None): data=file.read(self.size) self.value = unpack("b",data[3])[0] class UInt8(PSFNumber): size=4 def deSerializeFile(self, file, size=None): data=file.read(self.size) self.value = unpack("B",data[3])[0] class Int32(PSFNumber): size=4 def deSerializeFile(self, file, size=None): self.value = unpack(">i",file.read(self.size))[0] class UInt32(PSFNumber): size=4 def deSerializeFile(self, file, size=None): self.value = unpack(">I",file.read(self.size))[0] class Int64(PSFNumber): size=8 def __int__(self): return self.value def deSerializeFile(self, file, size=None): self.value = unpack(">q",file.read(self.size))[0] class UInt64(PSFNumber): size=8 def __int__(self): return self.value def deSerializeFile(self, file, size=None): self.value = unpack(">Q",file.read(self.size))[0] class Float64(PSFNumber): size=8 def __float__(self): return float(self.value) def toPSFasc(self, prec=6): if prec: fmt=('%%#%dg'%prec) else: fmt='%#g' return fmt%self.value def deSerializeFile(self, file, size=None): self.value = unpack(">d",file.read(self.size))[0] class Float32(PSFNumber): size=4 def __float__(self): return float(self.value) def deSerializeFile(self, file, size=None): self.value = unpack(">f",file.read(self.size))[0] class ComplexFloat64(PSFNumber): size=16 def toPSFasc(self, prec=6): if prec: fmt=('%%#%dg'%prec) else: fmt='%#g' return "(" + fmt%self.value.real + " " + fmt%self.value.imag + ")" def deSerializeFile(self, file, size=None): re,im = unpack(">dd",file.read(self.size)) self.value = complex(re,im) class String(PSFData): def __str__(self): return self.value def deSerializeFile(self, file, size=None): self.len = unpack(">I",file.read(4))[0] if self.len < 0x100: self.value = file.read(self.len) # Pad to 32-bit boundary file.read((4-self.len)%4) else: raise Exception("String too long %d"%self.len) def toPSFasc(self, prec=None): return "\""+str(self.value)+"\"" class Struct(PSFData): def __init__(self, structdef, value=None): self.structdef = structdef self.value = {} if value: self.setValue(value) def __getitem__(self, key): return self.value[key] def getValue(self): return dict([(k,v.getValue()) for k,v in self.value.items()]) def setValue(self, value): assert(value != None and len(value) == len(self.structdef.children)) for element, val in zip(self.structdef.children, value): valueobj = element.getDataObj() valueobj.setValue(val) self.value[element.name] = valueobj def deSerializeFile(self, file): for element in self.structdef.children: value = element.getDataObj() value.deSerializeFile(file) self.value[element.name] = value def toPSFasc(self, prec=None): s="(\n" for element in self.structdef.children: s+=self.value[element.name].toPSFasc(prec)+"\n" s+=")" return s def __repr__(self): return "\n".join([indent(s) for s in map(repr,self.value.items())]) + "\n" class Array(PSFData): def setValue(self, value): dataclass, length = self.extarg if value != None: self.children = [dataclass(value=val) for val in value] else: self.children = [dataclass(value=None) for val in range(length)] def getValue(self): return [v.getValue() for v in self.children] def __iter__(self): return self.children.__iter__() def __tuple__(self): return tuple(self.children) def __repr__(self): return "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class Chunk: """Base class for chunk""" def __init__(self, psf=None, type=None): self.psf = psf self.fileoffset=None if not hasattr(self.__class__, 'type'): self.type = type self.verbose = False self.name = "" def deSerializeFile(self, file): self.fileoffset = file.tell() type = UInt32.fromFile(file) if (self.type != None) and self.type != type: file.seek(-UInt32.size, 1) raise IncorrectChunk(type, self.type) def __repr__(self): return self.__class__.__name__ class NextSectionType(Chunk): type=1 class NextSectionSweep(Chunk): type=2 class NextSectionTrace(Chunk): type=3 class NextSectionValues(Chunk): type=4 class EndOfStructDef(Chunk): type=18 NextSectionClasses = [NextSectionType, NextSectionSweep, NextSectionTrace, NextSectionValues] class Property(Chunk): type=None valueclass=None def __init__(self, name=None, value=None): Chunk.__init__(self) self.name = String(name) self.value = self.valueclass(value) def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.name = String.fromFile(file) self.value = self.valueclass.fromFile(file) def toPSFasc(self, prec=9): return self.name.toPSFasc() + " " + self.value.toPSFasc(prec=prec) def __repr__(self): return self.__class__.__name__+"("+str(self.name)+","+str(self.value)+")" class PropertyString(Property): type=33 valueclass=String class PropertyUInt(Property): type=34 valueclass=UInt32 class PropertyFloat64(Property): type=35 valueclass=Float64 PropertyClasses = [PropertyString, PropertyUInt, PropertyFloat64] TYPEFLOATDOUBLE = 11 TYPEINTBYTE = 1 TYPECOMPLEXDOUBLE = 12 TYPESTRUCT = 16 TYPESTRING = 2 ## Incorrect number TYPEARRAY = 3 ## Incorrect number TYPEINTLONG = 5 class DataTypeDef(Chunk): """Class representing data type of waveform data""" type=16 ClassDict = { TYPEFLOATDOUBLE: Float64, TYPEINTBYTE: Int8, TYPECOMPLEXDOUBLE: ComplexFloat64, TYPESTRING: String, TYPEARRAY: Array, TYPEINTLONG: Int32 } PSFASCDict = { TYPEFLOATDOUBLE: "FLOAT DOUBLE", TYPEINTBYTE: "INT BYTE", TYPECOMPLEXDOUBLE: "COMPLEX DOUBLE", TYPESTRING: "STRING *", TYPEINTLONG: "INT LONG" } def __init__(self, psf, id=0, name=None, datatypeid=0, structdef=None): Chunk.__init__(self, psf, type) self.id = id self.name = name self.datatypeid = datatypeid self.structdef = structdef self.properties = [] def getDataObj(self): """Get a data object described by the DataType""" if self.datatypeid == TYPESTRUCT: return self.structdef.getDataObj() elif self.datatypeid == TYPEARRAY: return Array(extarg=(self.ClassDict[self.structdef[0]], self.structdef[1])) else: return self.ClassDict[self.datatypeid](extarg=self.structdef) def toPSFasc(self, prec=None): r=self.name.toPSFasc(prec) + " " if self.datatypeid == TYPESTRUCT: r+=self.structdef.toPSFasc(prec) elif self.datatypeid == TYPEARRAY: r+="ARRAY ( %s ) "%str(self.structdef[1])+self.PSFASCDict[self.structdef[0]] else: r+= self.PSFASCDict[self.datatypeid] if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def getDataSize(self): if self.datatypeid == TYPESTRUCT: return self.structdef.getDataSize() else: return self.ClassDict[self.datatypeid].size def deSerializeFile(self, file): start = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) arraytype = UInt32.fromFile(file) self.datatypeid = UInt32.fromFile(file) if arraytype != 0: self.datatypeid, self.structdef = TYPEARRAY, (UInt32.fromFile(file), self.datatypeid) if self.datatypeid == 16: self.structdef = StructDef.fromFile(file, self.psf) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid, "properties":self.properties})+")" class DataTypeRef(Chunk): type=16 """Class representing link to data type""" def __init__(self, psf, type=None): Chunk.__init__(self, psf, type) self.id = None self.name = None self.datatypeid = 0 self.properties = [] def getDataObj(self): """Get a data object described by the DataType""" return self.psf.types.idMap[self.datatypeid].getDataObj() def toPSFasc(self, prec=None): r=self.name.toPSFasc(prec) + " " r+=self.psf.types.idMap[self.datatypeid].name.toPSFasc() if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def getDataSize(self): return self.psf.types.idMap[self.datatypeid].getDataSize() def deSerializeFile(self, file): start = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.datatypeid = UInt32.fromFile(file) assert(self.datatypeid != 0) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid, "properties":self.properties})+")" class StructDef(PSFData): """Class representing struct definition""" @classmethod def fromFile(cls, file, psf): obj = cls() obj.deSerializeFile(file, psf) return obj def __init__(self): self.children = [] def getDataObj(self): return Struct(self) def getDataSize(self): return sum([child.getDataSize() for child in self.children]) def toPSFasc(self, prec=None): s="STRUCT(\n" for child in self.children: s+=child.toPSFasc(prec)+"\n" s+=")" return s def deSerializeFile(self, file, psf): while True: chunk = readChunk(psf, file, expectedclasses=[DataTypeDef, EndOfStructDef]) if isinstance(chunk, EndOfStructDef): break else: self.children.append(chunk) def __repr__(self): return self.__class__.__name__ + "(\n"+\ "\n".join(map(str,self.children))+\ ")\n" class SimpleContainer(Chunk): type = 21 def __init__(self, psf, type=None, childrenclslist=None, childrenclsignore=None): Chunk.__init__(self, psf, type) self.section = None self.children = [] self.childrenclslist = childrenclslist self.childrenclsignore = childrenclsignore self.endpos = None def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value self.children = [] while file.tell() < self.endpos: chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist+self.childrenclsignore) if chunk.__class__ in self.childrenclslist: self.children.append(chunk) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): s="" if self.fileoffset: s+= "0x%x"%self.fileoffset+ ":" s+= self.__class__.__name__ + "(" + str(self.type) +")" if self.endpos and self.fileoffset: s+= "size="+str(self.endpos-self.fileoffset) s+= "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" return s class Container22(Chunk): type=22 def __init__(self, psf, type=None, n=None, childrenclslist=None): Chunk.__init__(self, psf, 22) self.section = None self.children = [] self.childrenclslist = childrenclslist self.endpos = None def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value # Save end position of Container self.children = [] while file.tell() < self.endpos: chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist) self.children.append(chunk) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): return "0x%x"%self.fileoffset +":" + self.__class__.__name__ +\ "(" + str(self.type) +")" + "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class ZeroPad(Chunk): type = 20 def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) size = UInt32.fromFile(file).value self.endpos = file.tell() + size file.seek(self.endpos) class HashTable(Chunk): type = 19 """Class representing offset of trace data""" def __init__(self, psf, n=None): Chunk.__init__(self, psf, type) self.children = [] self.extra=[] def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) startpos = file.tell() size = UInt32.fromFile(file) for i in range(0, size/8): id = UInt32.fromFile(file) offset = UInt32.fromFile(file) self.children.append((id, offset)) def __repr__(self): return self.__class__.__name__+"\n"+ "\n".join([" 0x%x: 0x%x"%(k,v.value) for k,v in self.children])+")" class HashTableTrace(Chunk): type = 19 """Class representing offset of trace data""" def __init__(self, psf): Chunk.__init__(self, psf, type) self.children = [] def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.size = UInt32.fromFile(file) for i in range(0, self.size.value/16): id = UInt32.fromFile(file) offset = UInt32.fromFile(file) data1 = UInt32.fromFile(file).value data2 = UInt32.fromFile(file).value self.children.append((id,offset,data1,data2)) def __repr__(self): return self.__class__.__name__+"\n"+ "\n".join([" %s: 0x%x 0x%x 0x%x"%(pack(">I",k.value),v.value,d1,d2) for k,v,d1,d2 in self.children])+")" class HashContainer(Chunk): type=21 hashclass = HashTable def __init__(self, psf, childrenclslist=None, childrenclsignore=None): Chunk.__init__(self, psf, type) self.section = None self.children = [] self.childrenclslist = childrenclslist self.childrenclsignore = childrenclsignore self.endpos = None self.hashtable = None def __len__(self): return len(self.children) def getChunks(self): return self.children def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value self.children = [] self.data = Container22(self.psf, childrenclslist=self.childrenclslist) self.data.deSerializeFile(file) self.hashtable = self.hashclass(self.psf) self.hashtable.deSerializeFile(file) # Copy children reference from data self.children = self.data.children self.section = UInt32.fromFile(file) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def __repr__(self): s="" if self.fileoffset: s += "0x%x"%self.fileoffset +":" s += self.__class__.__name__ + "(" + str(self.type) +")" if self.endpos: s+=" size="+str(self.endpos-self.fileoffset) + "\n" s += "\n".join([indent(s) for s in map(str,(self.children, self.hashtable))]) + "\n" return s class HeaderSection(SimpleContainer): type=21 def __init__(self, psf, n=None): SimpleContainer.__init__(self,psf, childrenclslist=PropertyClasses, childrenclsignore=NextSectionClasses) self.properties = {} def addProperty(self, prop): """Add property to header""" self.children.append(prop) self.properties[prop.name] = prop.value def deSerializeFile(self, file): SimpleContainer.deSerializeFile(self, file) # Read header properties self.properties = {} for prop in self.children: self.properties[prop.name] = prop.value def toPSFasc(self, prec=None): r="HEADER\n" r+='"PSFversion" "1.00"\n' r+="\n".join([child.toPSFasc(prec) for child in self.children \ if not child.name.value[0:3].upper() == 'PSF']) return r class SweepSection(SimpleContainer): type=21 def __init__(self, psf): SimpleContainer.__init__(self, psf, childrenclslist=[DataTypeRef], childrenclsignore=NextSectionClasses) def deSerializeFile(self, file): SimpleContainer.deSerializeFile(self, file) # Read header properties self.idMap = {} for chunk in self.children: self.idMap[chunk.id] = chunk def getSweep(self, id): return self.idMap[id] def getNames(self): return tuple([str(child.name) for child in self.children]) def toPSFasc(self, prec=None): r="SWEEP\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class TypeSection(HashContainer): def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[DataTypeDef], childrenclsignore=NextSectionClasses) self.idMap = {} self.nameMap = {} def addType(self, type): type.id = self.psf.allocId() self.children.append(type) self.idMap[type.id] = type self.nameMap[type.name] = type def getType(self, id): return self.idMap[id] def getTypeByName(self, name): return self.nameMap[name] def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) # Read header properties self.idMap = {} for chunk in self.children: self.idMap[chunk.id] = chunk self.nameMap[chunk.name] = type def toPSFasc(self, prec=None): r="TYPE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class TraceSection(HashContainer): hashclass = HashTableTrace def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[GroupDef, DataTypeRef]) self.idMap = {} self.nameIndex = {} def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) self.idMap = {} for index, chunk in enumerate(self.children): self.idMap[chunk.id] = chunk if isinstance(chunk, GroupDef): self.nameIndex.update(dict([(par, (index,)+value) for par,value in chunk.getNameIndex().items()])) else: self.nameIndex[chunk.name] = (index,) def getNameIndex(self): return self.nameIndex def toPSFasc(self, prec=None): r="TRACE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r def getTraceNames(self): result = [] for trace in self.children: if isinstance(trace,GroupDef): result += trace.getNames() else: result.append(trace.name) return tuple(map(str, result)) def getTraceIndexByName(self, name): """Returns an index to the given trace name The index is hierarchical so if if the traces are divided into 2 groups the index (0,1) means child 1 of group 0 >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.traces.getTraceIndexByName("VIN") (0, 1) >>> psf=PSFReader('./test/resultdirs/parsweep2/C=1e-12,R=1e-12/psf/ac.ac') >>> psf.open() >>> psf.traces.getTraceIndexByName("net3") (0,) """ return self.nameIndex[name] class ValuesSectionNonSweep(HashContainer): type=21 def __init__(self, psf): HashContainer.__init__(self, psf, childrenclslist=[NonSweepValue]) self.idMap={} self.nameMap={} def addValue(self, value): value.id = self.psf.allocId() if not isinstance(value, NonSweepValue): raise ValueError("Value should be a NonSweepValue") self.idMap[value.id] = value self.nameMap[value.name] = value self.children.append(value) def deSerializeFile(self, file): HashContainer.deSerializeFile(self, file) for child in self.children: self.nameMap[child.name] = child def getValuePropertiesByName(self, name): return dict([(prop.name, prop.value) for prop in self.nameMap[name].properties]) def getValueByName(self, name): return self.nameMap[name].getValue() def getValueNames(self): return tuple([child.name for child in self.children]) def toPSFasc(self, prec=None): r="VALUE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class ValuesSectionSweep(SimpleContainer): type=21 def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.endpos = UInt32.fromFile(file).value windowedsweep = self.psf.header.properties.has_key('PSF window size') if windowedsweep: el = ZeroPad(self.psf) el.deSerializeFile(file) isweep=0 while isweep < self.psf.header.properties['PSF sweep points']: if windowedsweep: value = SweepValueWindowed(self.psf) else: value = SweepValueSimple(self.psf) isweep += value.deSerializeFile(file, n=self.psf.header.properties['PSF sweep points']-isweep) self.children.append(value) self.section = UInt32.fromFile(file) # Read trailing bytes if self.endpos-file.tell() != 0: warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__)) self.tail = file.read(self.endpos-file.tell()) file.seek(self.endpos) def getSweepParamValues(self): return reduce(operator.__add__, [child.getSweepParamValues() for child in self.children]) def getValueNames(self): return self.psf.traces.getTraceNames() def __len__(self): return len(self.psf.traces) def getValueByName(self, name): windowedsweep = self.psf.header.properties.has_key('PSF window size') index = self.psf.traces.getTraceIndexByName(name) result = [] for child in self.children: obj=child for i in index: obj = obj.children[i] # If windowed sweep, each child will be a list of values in the window if windowedsweep: result += [v.getValue() for v in obj] else: result.append(obj.getValue()) return numpy.array(result) def toPSFasc(self, prec=None): r="VALUE\n" r+="\n".join([child.toPSFasc(prec) for child in self.children]) return r class NonSweepValue(Chunk): type=16 def __init__(self, psf, id=None, typeid=None, name=None, value=None): Chunk.__init__(self, psf, type) self.id = id self.name = name self.typeid = typeid if typeid: self.valuetype = self.psf.types.idMap[self.typeid] else: self.valuetype = None if value: self.value = value elif self.valuetype: self.value = self.valuetype.getDataObj() else: self.value = None self.properties = [] def getValue(self): return self.value.getValue() def setValue(self, value): self.value.setValue(value) def deSerializeFile(self, file): startpos = file.tell() Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.typeid = UInt32.fromFile(file) assert(self.typeid != 0) self.valuetype = self.psf.types.idMap[self.typeid] self.value = self.valuetype.getDataObj() self.value.deSerializeFile(file) # Read possible property objects that belongs to the type by peeking ahead while True: oldpos = file.tell() try: prop = readChunk(self.psf, file, expectedclasses=PropertyClasses) self.properties.append(prop) except ValueError: file.seek(oldpos) break def toPSFasc(self, prec=None): r = self.name.toPSFasc(prec) + " " + self.valuetype.name.toPSFasc(prec) + " " + self.value.toPSFasc(prec) if len(self.properties)>0: r+=" PROP(\n" r+="\n".join([prop.toPSFasc(prec) for prop in self.properties]) r+="\n)" return r def __repr__(self): return self.__class__.__name__+"("+str({"name":self.name, "id":"0x%x"%self.id, "typeid":"0x%x"%self.typeid, "properties":self.properties,"value":self.value})+")" class SweepValue(Chunk): """Class representing waveform data""" type = 16 def __init__(self, psf, type=None): Chunk.__init__(self, psf, type) self.id = None self.linktypeid = UInt32() self.datatypeid = UInt32() self.paramtype = None self.paramvalue = None self.children = [] self.properties = [] def deSerializeFile(self, file, n=None): pass def getSweepParamValues(self): pass def __len__(self): return len(self.children) def __repr__(self): return self.__class__.__name__ + "(" + str(self.paramtype.name) + "=" + str(self.paramvalue) +","+ \ "children="+str(self.children) +")\n" class SweepValueSimple(SweepValue): def deSerializeFile(self, file, n=None): Chunk.deSerializeFile(self, file) self.paramtypeid = UInt32.fromFile(file) self.paramtype = self.psf.sweeps.getSweep(self.paramtypeid) self.paramvalue = self.paramtype.getDataObj() self.paramvalue.deSerializeFile(file) for datatype in self.psf.traces.children: datatypeid = UInt32.fromFile(file) if datatypeid in (17,16): valuetypeid = UInt32.fromFile(file) if valuetypeid != datatype.id: ## Unexpected value type id found ## This is probably because of missing trace values ## Undo read of datatypeid, valuetypeid and break out of loop and file.seek(-2*UInt32.size, 1) break value = datatype.getDataObj() value.deSerializeFile(file) self.children.append(value) elif datatypeid == 15: ## End of section file.seek(-UInt32.size, 1) break else: raise Exception("Datatypeid unknown 0x%x" % datatypeid) return 1 def getSweepParamValues(self): return [self.paramvalue.getValue()] def toPSFasc(self, prec=None): r=self.paramtype.name.toPSFasc(prec) + " " +self.paramvalue.toPSFasc(prec)+"\n" r+="\n".join([valuetype.name.toPSFasc(prec) + " " + value.toPSFasc(prec) \ for valuetype, value in zip(self.psf.traces.children, self.children)]) return r class SweepValueWindowed(SweepValue): def deSerializeFile(self, file, n=None): bufferstart = file.tell() Chunk.deSerializeFile(self, file) self.paramtypeid = UInt32.fromFile(file) assert(len(self.psf.sweeps.children) == 1) self.paramtype=self.psf.sweeps.children[0] self.paramvalue = [] # Get sweep parameter values paramvaluesize = self.paramtype.getDataSize() windowsize = self.psf.header.properties['PSF window size'].value leftinwindow = (file.tell()//windowsize + 1)*windowsize - file.tell() windowlen = leftinwindow//paramvaluesize; if n > windowlen: n = windowlen for j in xrange(n): paramvalue = self.paramtype.getDataObj() paramvalue.deSerializeFile(file) if j < n: self.paramvalue.append(paramvalue) # Get trace values for trace in self.psf.traces.children: value = trace.getDataObj() value.deSerializeFile(file, count=n, windowsize=self.psf.header.properties['PSF window size'].value) self.children.append(value) # Skip trailing padding bytes padsize = int((self.psf.header.properties['PSF buffer size'] - (file.tell()-bufferstart))% \ self.psf.header.properties['PSF buffer size']) file.seek(padsize, 1) return n def getSweepParamValues(self): return [v.getValue() for v in self.paramvalue] def toPSFasc(self, prec=None): r='' for i, paramvalue in enumerate(self.paramvalue): r+=self.paramtype.name.toPSFasc(prec) + " " + paramvalue.toPSFasc(prec) + "\n" r+="\n".join([trace.name.toPSFasc(prec) + " " + value.toPSFasc(prec=prec, index=i) \ for trace,value in zip(self.psf.traces.children, self.children)]) if i < len(self.paramvalue)-1: r+="\n" return r class GroupData(PSFData): def __init__(self, groupdef): PSFData.__init__(self) self.groupdef = groupdef self.children = [] def deSerializeFile(self, file, count=None, windowsize=None): for element in self.groupdef.children: if count==None: value = element.getDataObj() value.deSerializeFile(file) self.children.append(value) else: valuearray=[] # If a window is used in the PSF file, the entire window is stored # and the data is aligned to the end of the window. So we need # to skip window size - data size file.seek(int(windowsize - count*element.getDataSize()), 1) for i in xrange(0,count): value = element.getDataObj() value.deSerializeFile(file) valuearray.append(value) self.children.append(valuearray) def toPSFasc(self, prec=None, index=None): if index != None: return "\n".join([v[index].toPSFasc(prec) for v in self.children]) else: return "\n".join([v.toPSFasc(prec) for v in self.children]) def getSize(self): return self.groupdef.getDataSize() def __repr__(self): return "GroupData" + "\n" + "\n".join([indent(s) for s in map(repr,self.children)]) + "\n" class GroupDef(Chunk): type=17 """Class representing group of traces""" def __init__(self, psf): Chunk.__init__(self, psf) self.children=[] self.datasize=None def getDataObj(self): return GroupData(self) def deSerializeFile(self, file): Chunk.deSerializeFile(self, file) self.id = UInt32.fromFile(file) self.name = String.fromFile(file) self.nchildren = UInt32.fromFile(file) # Read children self.children = [] self.datasize = 0 for i in range(0, self.nchildren): child = DataTypeRef(self.psf) child.deSerializeFile(file) self.children.append(child) self.datasize += child.getDataSize() def getNameIndex(self): return dict([(v.name, (i,)) for i,v in enumerate(self.children)]) def toPSFasc(self, prec=None): s=self.name.toPSFasc(prec) + " GROUP %d\n"%len(self.children) s+="\n".join([child.toPSFasc(prec) for child in self.children]) return s def getDataSize(self): return self.datasize def getNames(self): return [str(child.name) for child in self.children] def __repr__(self): return "0x%x"%self.fileoffset +":" + self.__class__.__name__+ "(id=0x%x"%self.id+", nchildren=%d"%self.nchildren+")\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n" class UnknownChunk(Exception): def __init__(self, chunktype): self.type = chunktype def __str__(self): return "Unknown chunk of type: %d"%self.type class InvalidChunk(Exception): def __init__(self, chunk): self.chunk = chunk def __str__(self): return "Invalid %s"%(self.chunk.__class__.__name__) class IncorrectChunk(Exception): def __init__(self, type, expectedtype): self.type = type self.expectedtype = expectedtype def __str__(self): return "Incorrect chunk type %d (should be %d)"%(self.type, self.expectedtype) class LastValue(Exception): pass def readChunk(psf, file, expectedclasses=None): type = UInt32.fromFile(file) file.seek(-4, 1) # Rewind one word since the type will be read again by the deSerializeFile function if expectedclasses: if not type in [cls.type for cls in expectedclasses]: raise ValueError("Unexpected type %d, not in "%type + str([cls.type for cls in expectedclasses])) for cls in expectedclasses: if type == cls.type: chunk = cls(psf) else: raise Exception("Use expectedclasses!") if type == 21: chunk = Section(psf) elif type == 20: chunk = ZeroPad(psf) elif type == 22: chunk = Container22(psf, type, n=n) elif type == 33: chunk = PropertyString(psf) elif type == 34: chunk = PropertyUInt(psf) elif type == 35: chunk = PropertyFloat64(psf) elif type == 16: chunk = DataTypeDef(psf,type) elif type == 17: chunk = GroupDef(psf) elif type == 19: chunk = HashTable(psf, n=n) elif type in (1,2,3,4): file.seek(4,1) return None else: warning("Unknown chunk %d"%type) raise UnknownChunk(type) chunk.deSerializeFile(file) return chunk class PSFReader(object): def __init__(self, filename=None, asc=None): self.header = None self.types = TypeSection(self) self.sweeps = None self.traces = None self.lastid = 0x1000 self.verbose = False self.filename = filename self.file = None self.values = None self.asc = asc def open(self): """Open a PSF file and read its headers. Example: Trying to open a valid psf file >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() """ if self.asc == None: self.asc = False if not self.asc: self.file = open(self.filename, "rb") if self.validate(): self.deSerializeFile(self.file) else: raise PSFInvalid("Invalid PSF file") else: newpsfobj = psfasc.parse("psfasc", open(self.filename).read()) self.header = newpsfobj.header self.types = newpsfobj.types self.sweeps = newpsfobj.sweeps self.traces = newpsfobj.traces self.values = newpsfobj.values self.lastid = newpsfobj.lastid self.verbose = newpsfobj.verbose def validate(self): """Check if the PSF file is valid. Returns True if valid, False otherwise >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.validate() True >>> psf=PSFReader('./test/psfasc/srcSweep.asc') >>> psf.validate() False """ if self.file == None: file = open(self.filename, "rb") else: file = self.file # Read Clarissa signature file.seek(-4-8,2) clarissa = file.read(8) return clarissa == "Clarissa" def getNSweepPoints(self): """Returns number of sweeps. 0 if not swept. >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getNSweepPoints() 4 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF sweep points'] def getNSweeps(self): """Returns the number of nested sweeps >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getNSweeps() 1 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF sweeps'] def __len__(self): return len(self.values) def getValueNames(self): """Returns a tuple of the names of the traces >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.getValueNames() >>> psf.open() >>> psf.getValueNames() ('VOUT', 'VIN', 'R0') >>> psf=PSFReader('./test/resultdirs/simple/opBegin') >>> psf.open() >>> psf.getValueNames() ('R0', 'V1', 'V0', 'E0', 'VIN', 'NET9', 'VOUT') """ if self.values: return self.values.getValueNames() def getSweepParamNames(self): return self.sweeps.getNames() def getSweepParamValues(self, dim=0): """Returns a numpy.array of sweep parameter values for sweep dimension dim. >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getSweepParamValues(0) array([ 1., 2., 3., 4.]) windowed result >>> psf=PSFReader('./test/psf/timeSweep') >>> psf.open() >>> psf.getSweepParamValues(0)[:3] array([ 0.00000000e+00, 2.00000000e-11, 5.33333333e-11]) """ return numpy.array(self.values.getSweepParamValues()) def getValuePropertiesByName(self, name): """Returns the properties associated with value >>> psf=PSFReader('./test/psf/opBegin') >>> psf.open() >>> psf.getValuePropertiesByName("XIRXRFMIXTRIM0.XM1PDAC1.XMN.MAIN")["Region"] 'subthreshold' """ return self.values.getValuePropertiesByName(name) def getValuesByName(self, name): """Returns a numpy.array of trace values for swept results and a scalar for non swept. Example: swept psf file >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.getValuesByName("VOUT") array([-6., -4., -2., 0.]) >>> psf.getValuesByName("VIN") array([ 1., 2., 3., 4.]) swept psf with complex numbers >>> psf=PSFReader('./test/psf/frequencySweep') >>> psf.open() >>> res = psf.getValuesByName("ANT_CM") >>> len(res) 123 >>> res[:3] array([ 0.6+0.j, 0. +0.j, 0. +0.j]) swept windowed psf file >>> psf=PSFReader('./test/psf/timeSweep') >>> psf.open() >>> psf.getValuesByName("INP")[0:3] array([ 0.6 , 0.62486899, 0.66211478]) non-swept psf file >>> psf=PSFReader('./test/psf/dcOpInfo.info') >>> psf.open() >>> psf.getValuesByName("IREG21U_0.MP5.b1")['betadc'] 4.7957014499434756 swept psf file withouth groups >>> psf=PSFReader('./test/resultdirs/parsweep/C=1e-12,R=1e-12/psf/ac.ac') >>> psf.open() >>> psf.getValuesByName("net3") array([ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]) """ return self.values.getValueByName(name) def nTraces(self): """Returns number of traces >>> psf=PSFReader('./test/psf/srcSweep') >>> psf.open() >>> psf.nTraces() 3 """ if self.file == None: ValueError("Please open the PSF file first") return self.header.properties['PSF traces'] def allocId(self): self.lastid+=1 return self.lastid-1 def info(self): s="Number of sweeps: %d\n"%self.getNSweeps() if self.getNSweeps() > 0: s+="Number of sweep points: %d\n"%self.getNSweepPoints() s+="Number of traces: %d"%self.nTraces() return s def updateHeader(self): if self.sweeps: sweeps = len(self.sweeps.children) else: sweeps=0 self.header.addProperty(PropertyUInt("PSF sweeps", sweeps)) def deSerializeFile(self, file): # Find filesize file.seek(0,2) filesize = file.tell() # Last word contains the size of the data file.seek(-4,2) datasize = UInt32.fromFile(file).value if self.verbose: print "Total data size: ",datasize # Read Clarissa signature file.seek(-4-8,2) clarissa = file.read(8) if not clarissa == "Clarissa": raise ValueError("Clarissa signature not found") # Read section index table sectionoffsets = {} file.seek(-4-8-8,2) pos = file.tell() sectionnums = [] while file.tell() >= datasize: sectionnum = UInt32.fromFile(file) sectionnums.insert(0,sectionnum.value) offset = UInt32.fromFile(file) sectionoffsets[sectionnum] = offset pos -= 8 file.seek(pos) offsets = [sectionoffsets[secnum] for secnum in sectionnums] sizes = map(operator.sub, offsets[1:]+[datasize], offsets) sectionsizes = dict(zip(sectionnums, sizes)) if self.verbose: print sectionoffsets, sectionsizes file.seek(0) self.unk1 = UInt32.fromFile(file) if self.verbose: print "First word: 0x%x"%self.unk1 # Load headers file.seek(int(sectionoffsets[0])) self.header = HeaderSection(self) self.header.deSerializeFile(file) if self.verbose: print "HEADER" print self.header if sectionoffsets.has_key(1): file.seek(int(sectionoffsets[1])) self.types.deSerializeFile(file) if self.verbose: print "TYPE" print self.types if sectionoffsets.has_key(2): file.seek(int(sectionoffsets[2])) self.sweeps = SweepSection(self) self.sweeps.deSerializeFile(file) if self.verbose: print "SWEEPS" print self.sweeps if sectionoffsets.has_key(3): file.seek(int(sectionoffsets[3])) self.traces = TraceSection(self) self.traces.deSerializeFile(file) if sectionoffsets.has_key(4): file.seek(int(sectionoffsets[4])) # Load data if self.sweeps: self.values = ValuesSectionSweep(self) else: self.values = ValuesSectionNonSweep(self) self.values.deSerializeFile(file) def printme(self): print "HEADER" print self.header print "TYPES" print self.types if self.sweeps: print "SWEEP" print self.sweeps if self.traces: print "TRACE" print self.traces print "VALUES" print self.values def toPSFasc(self, prec=None): """Export to PSF ascii""" sections = [self.header.toPSFasc(prec), self.types.toPSFasc(prec)] if self.sweeps: sections.append(self.sweeps.toPSFasc(prec)) if self.traces: sections.append(self.traces.toPSFasc(prec)) if self.values: sections.append(self.values.toPSFasc(prec)) r="\n".join(sections) + "\n" r+="END\n" return r def __repr__(self): return "\n".join(map(str, (self.header, self.types, self.sweeps, self.traces, self.values))) if __name__ == "__main__": import doctest doctest.testmod()
mit
6,933,756,850,009,863,000
-319,156,171,653,495,000
31.022713
190
0.569805
false
jlegendary/orange
Orange/OrangeWidgets/Classify/OWKNN.py
6
6326
""" <name>k Nearest Neighbours</name> <description>K-nearest neighbours learner/classifier.</description> <icon>icons/kNearestNeighbours.svg</icon> <contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact> <priority>25</priority> """ from OWWidget import * import OWGUI from exceptions import Exception from orngWrap import PreprocessedLearner NAME = "k Nearest Neighbours" ID = "orange.widgets.classify.knn" DESCRIPTION = "K-nearest neighbours learner/classifier." ICON = "icons/kNearestNeighbours.svg" AUTHOR = "Janez Demsar" PRIORITY = 25 HELP_REF = "k-Nearest Neighbours" KEYWORDS = ["knn"] INPUTS = ( InputSignal(name="Data", type=ExampleTable, handler="setData", doc="Training data set", id="train-data"), InputSignal(name="Preprocess", type=PreprocessedLearner, handler="setPreprocessor", id="preprocessor") ) OUTPUTS = ( OutputSignal(name="Learner", type=orange.Learner, doc="The kNN learner with settings as specified in " "the dialog", id="learner"), OutputSignal(name="kNN Classifier", type=orange.kNNClassifier, doc="A kNN classifier trained on 'Data'.", id="knn-classifier") ) WIDGET_CLASS = "OWKNN" class OWKNN(OWWidget): settingsList = ["name", "k", "metrics", "ranks", "normalize", "ignoreUnknowns"] def __init__(self, parent=None, signalManager = None, name='kNN'): OWWidget.__init__(self, parent, signalManager, name, wantMainArea = 0, resizingEnabled = 0) self.callbackDeposit = [] self.inputs = [("Data", ExampleTable, self.setData), ("Preprocess", PreprocessedLearner, self.setPreprocessor)] self.outputs = [("Learner", orange.Learner),("kNN Classifier", orange.kNNClassifier)] self.metricsList = [("Euclidean", orange.ExamplesDistanceConstructor_Euclidean), ("Hamming", orange.ExamplesDistanceConstructor_Hamming), ("Manhattan", orange.ExamplesDistanceConstructor_Manhattan), ("Maximal", orange.ExamplesDistanceConstructor_Maximal), # ("Dynamic time warp", orange.ExamplesDistanceConstructor_DTW) ] # Settings self.name = 'kNN' self.k = 5; self.metrics = 0; self.ranks = 0 self.ignoreUnknowns = 0 self.normalize = self.oldNormalize = 1 self.loadSettings() self.data = None # input data set self.preprocessor = None # no preprocessing as default self.setLearner() # this just sets the learner, no data # has come to the input yet OWGUI.lineEdit(self.controlArea, self, 'name', box='Learner/Classifier Name', \ tooltip='Name to be used by other widgets to identify your learner/classifier.') OWGUI.separator(self.controlArea) wbN = OWGUI.widgetBox(self.controlArea, "Neighbours") OWGUI.spin(wbN, self, "k", 1, 100, 1, None, "Number of neighbours ", orientation="horizontal") OWGUI.checkBox(wbN, self, "ranks", "Weighting by ranks, not distances") OWGUI.separator(self.controlArea) wbM = OWGUI.widgetBox(self.controlArea, "Metrics") OWGUI.comboBox(wbM, self, "metrics", items = [x[0] for x in self.metricsList], valueType = int, callback = self.metricsChanged) self.cbNormalize = OWGUI.checkBox(wbM, self, "normalize", "Normalize continuous attributes") OWGUI.checkBox(wbM, self, "ignoreUnknowns", "Ignore unknown values") self.metricsChanged() OWGUI.separator(self.controlArea) OWGUI.button(self.controlArea, self, "&Apply", callback=self.setLearner, disabled=0, default=True) OWGUI.rubber(self.controlArea) self.resize(100,250) def sendReport(self): self.reportSettings("Learning parameters", [("Metrics", self.metricsList[self.metrics][0]), not self.metrics and ("Continuous attributes", ["Raw", "Normalized"][self.normalize]), ("Unknown values ignored", OWGUI.YesNo[self.ignoreUnknowns]), ("Number of neighbours", self.k), ("Weighting", ["By distances", "By ranked distances"][self.ranks])]) self.reportData(self.data) def metricsChanged(self): if not self.metrics and not self.cbNormalize.isEnabled(): self.normalize = self.oldNormalize self.cbNormalize.setEnabled(True) elif self.metrics and self.cbNormalize.isEnabled(): self.oldNormalize = self.normalize self.normalize = False self.cbNormalize.setEnabled(False) def setData(self,data): self.data = self.isDataWithClass(data, orange.VarTypes.Discrete, checkMissing=True) and data or None self.setLearner() def setPreprocessor(self, pp): self.preprocessor = pp self.setLearner() def setLearner(self): distconst = self.metricsList[self.metrics][1]() distconst.ignoreUnknowns = self.ignoreUnknowns distconst.normalize = self.normalize self.learner = orange.kNNLearner(k = self.k, rankWeight = self.ranks, distanceConstructor = distconst) if self.preprocessor: self.learner = self.preprocessor.wrapLearner(self.learner) self.learner.name = self.name self.send("Learner", self.learner) self.learn() def learn(self): self.classifier = None if self.data and self.learner: try: self.classifier = self.learner(self.data) self.classifier.name = self.name except Exception, (errValue): self.classifier = None self.error(str(errValue)) self.send("kNN Classifier", self.classifier) if __name__ == "__main__": a = QApplication(sys.argv) ow = OWKNN() dataset = orange.ExampleTable('adult_sample') ow.setData(dataset) ow.show() a.exec_() ow.saveSettings()
gpl-3.0
8,613,750,084,434,552,000
5,431,000,977,329,990,000
35.356322
135
0.605596
false
TresysTechnology/setools
tests/nodeconquery.py
1
10617
# Copyright 2014, Tresys Technology, LLC # Copyright 2017, Chris PeBenito <pebenito@ieee.org> # # This file is part of SETools. # # SETools is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # SETools is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SETools. If not, see <http://www.gnu.org/licenses/>. # import sys import unittest from socket import AF_INET6 from ipaddress import IPv4Network, IPv6Network from setools import SELinuxPolicy, NodeconQuery class NodeconQueryTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.p = SELinuxPolicy("tests/nodeconquery.conf") def test_000_unset(self): """Nodecon query with no criteria""" # query with no parameters gets all nodecons. nodecons = sorted(self.p.nodecons()) q = NodeconQuery(self.p) q_nodecons = sorted(q.results()) self.assertListEqual(nodecons, q_nodecons) def test_001_ip_version(self): """Nodecon query with IP version match.""" q = NodeconQuery(self.p, ip_version=AF_INET6) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv6Network("1100::/16"), IPv6Network("1110::/16")], nodecons) def test_020_user_exact(self): """Nodecon query with context user exact match""" q = NodeconQuery(self.p, user="user20", user_regex=False) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.20.1/32")], nodecons) def test_021_user_regex(self): """Nodecon query with context user regex match""" q = NodeconQuery(self.p, user="user21(a|b)", user_regex=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.21.1/32"), IPv4Network("10.1.21.2/32")], nodecons) def test_030_role_exact(self): """Nodecon query with context role exact match""" q = NodeconQuery(self.p, role="role30_r", role_regex=False) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.30.1/32")], nodecons) def test_031_role_regex(self): """Nodecon query with context role regex match""" q = NodeconQuery(self.p, role="role31(a|c)_r", role_regex=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.31.1/32"), IPv4Network("10.1.31.3/32")], nodecons) def test_040_type_exact(self): """Nodecon query with context type exact match""" q = NodeconQuery(self.p, type_="type40", type_regex=False) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.40.1/32")], nodecons) def test_041_type_regex(self): """Nodecon query with context type regex match""" q = NodeconQuery(self.p, type_="type41(b|c)", type_regex=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.41.2/32"), IPv4Network("10.1.41.3/32")], nodecons) def test_050_range_exact(self): """Nodecon query with context range exact match""" q = NodeconQuery(self.p, range_="s0:c1 - s0:c0.c4") nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.50.1/32")], nodecons) def test_051_range_overlap1(self): """Nodecon query with context range overlap match (equal)""" q = NodeconQuery(self.p, range_="s1:c1 - s1:c0.c4", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons) def test_051_range_overlap2(self): """Nodecon query with context range overlap match (subset)""" q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c3", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons) def test_051_range_overlap3(self): """Nodecon query with context range overlap match (superset)""" q = NodeconQuery(self.p, range_="s1 - s1:c0.c4", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons) def test_051_range_overlap4(self): """Nodecon query with context range overlap match (overlap low level)""" q = NodeconQuery(self.p, range_="s1 - s1:c1,c2", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons) def test_051_range_overlap5(self): """Nodecon query with context range overlap match (overlap high level)""" q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c4", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons) def test_052_range_subset1(self): """Nodecon query with context range subset match""" q = NodeconQuery(self.p, range_="s2:c1,c2 - s2:c0.c3", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons) def test_052_range_subset2(self): """Nodecon query with context range subset match (equal)""" q = NodeconQuery(self.p, range_="s2:c1 - s2:c1.c3", range_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons) def test_053_range_superset1(self): """Nodecon query with context range superset match""" q = NodeconQuery(self.p, range_="s3 - s3:c0.c4", range_superset=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons) def test_053_range_superset2(self): """Nodecon query with context range superset match (equal)""" q = NodeconQuery(self.p, range_="s3:c1 - s3:c1.c3", range_superset=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons) def test_054_range_proper_subset1(self): """Nodecon query with context range proper subset match""" q = NodeconQuery(self.p, range_="s4:c1,c2", range_subset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons) def test_054_range_proper_subset2(self): """Nodecon query with context range proper subset match (equal)""" q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c3", range_subset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([], nodecons) def test_054_range_proper_subset3(self): """Nodecon query with context range proper subset match (equal low only)""" q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c2", range_subset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons) def test_054_range_proper_subset4(self): """Nodecon query with context range proper subset match (equal high only)""" q = NodeconQuery(self.p, range_="s4:c1,c2 - s4:c1.c3", range_subset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons) def test_055_range_proper_superset1(self): """Nodecon query with context range proper superset match""" q = NodeconQuery(self.p, range_="s5 - s5:c0.c4", range_superset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons) def test_055_range_proper_superset2(self): """Nodecon query with context range proper superset match (equal)""" q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c3", range_superset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([], nodecons) def test_055_range_proper_superset3(self): """Nodecon query with context range proper superset match (equal low)""" q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c4", range_superset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons) def test_055_range_proper_superset4(self): """Nodecon query with context range proper superset match (equal high)""" q = NodeconQuery(self.p, range_="s5 - s5:c1.c3", range_superset=True, range_proper=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons) def test_100_v4network_equal(self): """Nodecon query with IPv4 equal network""" q = NodeconQuery(self.p, network="192.168.1.0/24", network_overlap=False) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("192.168.1.0/24")], nodecons) def test_101_v4network_overlap(self): """Nodecon query with IPv4 network overlap""" q = NodeconQuery(self.p, network="192.168.201.0/24", network_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv4Network("192.168.200.0/22")], nodecons) def test_110_v6network_equal(self): """Nodecon query with IPv6 equal network""" q = NodeconQuery(self.p, network="1100::/16", network_overlap=False) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv6Network("1100::/16")], nodecons) def test_111_v6network_overlap(self): """Nodecon query with IPv6 network overlap""" q = NodeconQuery(self.p, network="1110:8000::/17", network_overlap=True) nodecons = sorted(n.network for n in q.results()) self.assertListEqual([IPv6Network("1110::/16")], nodecons)
lgpl-2.1
8,283,404,454,474,569,000
-4,775,434,163,045,903,000
42.512295
100
0.655081
false
zq317157782/Narukami
external/googletest/googlemock/scripts/generator/cpp/ast.py
16
62772
#!/usr/bin/env python # # Copyright 2007 Neal Norwitz # Portions Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate an Abstract Syntax Tree (AST) for C++.""" __author__ = 'nnorwitz@google.com (Neal Norwitz)' # TODO: # * Tokens should never be exported, need to convert to Nodes # (return types, parameters, etc.) # * Handle static class data for templatized classes # * Handle casts (both C++ and C-style) # * Handle conditions and loops (if/else, switch, for, while/do) # # TODO much, much later: # * Handle #define # * exceptions try: # Python 3.x import builtins except ImportError: # Python 2.x import __builtin__ as builtins import sys import traceback from cpp import keywords from cpp import tokenize from cpp import utils if not hasattr(builtins, 'reversed'): # Support Python 2.3 and earlier. def reversed(seq): for i in range(len(seq)-1, -1, -1): yield seq[i] if not hasattr(builtins, 'next'): # Support Python 2.5 and earlier. def next(obj): return obj.next() VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3) FUNCTION_NONE = 0x00 FUNCTION_CONST = 0x01 FUNCTION_VIRTUAL = 0x02 FUNCTION_PURE_VIRTUAL = 0x04 FUNCTION_CTOR = 0x08 FUNCTION_DTOR = 0x10 FUNCTION_ATTRIBUTE = 0x20 FUNCTION_UNKNOWN_ANNOTATION = 0x40 FUNCTION_THROW = 0x80 FUNCTION_OVERRIDE = 0x100 """ These are currently unused. Should really handle these properly at some point. TYPE_MODIFIER_INLINE = 0x010000 TYPE_MODIFIER_EXTERN = 0x020000 TYPE_MODIFIER_STATIC = 0x040000 TYPE_MODIFIER_CONST = 0x080000 TYPE_MODIFIER_REGISTER = 0x100000 TYPE_MODIFIER_VOLATILE = 0x200000 TYPE_MODIFIER_MUTABLE = 0x400000 TYPE_MODIFIER_MAP = { 'inline': TYPE_MODIFIER_INLINE, 'extern': TYPE_MODIFIER_EXTERN, 'static': TYPE_MODIFIER_STATIC, 'const': TYPE_MODIFIER_CONST, 'register': TYPE_MODIFIER_REGISTER, 'volatile': TYPE_MODIFIER_VOLATILE, 'mutable': TYPE_MODIFIER_MUTABLE, } """ _INTERNAL_TOKEN = 'internal' _NAMESPACE_POP = 'ns-pop' # TODO(nnorwitz): use this as a singleton for templated_types, etc # where we don't want to create a new empty dict each time. It is also const. class _NullDict(object): __contains__ = lambda self: False keys = values = items = iterkeys = itervalues = iteritems = lambda self: () # TODO(nnorwitz): move AST nodes into a separate module. class Node(object): """Base AST node.""" def __init__(self, start, end): self.start = start self.end = end def IsDeclaration(self): """Returns bool if this node is a declaration.""" return False def IsDefinition(self): """Returns bool if this node is a definition.""" return False def IsExportable(self): """Returns bool if this node exportable from a header file.""" return False def Requires(self, node): """Does this AST node require the definition of the node passed in?""" return False def XXX__str__(self): return self._StringHelper(self.__class__.__name__, '') def _StringHelper(self, name, suffix): if not utils.DEBUG: return '%s(%s)' % (name, suffix) return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix) def __repr__(self): return str(self) class Define(Node): def __init__(self, start, end, name, definition): Node.__init__(self, start, end) self.name = name self.definition = definition def __str__(self): value = '%s %s' % (self.name, self.definition) return self._StringHelper(self.__class__.__name__, value) class Include(Node): def __init__(self, start, end, filename, system): Node.__init__(self, start, end) self.filename = filename self.system = system def __str__(self): fmt = '"%s"' if self.system: fmt = '<%s>' return self._StringHelper(self.__class__.__name__, fmt % self.filename) class Goto(Node): def __init__(self, start, end, label): Node.__init__(self, start, end) self.label = label def __str__(self): return self._StringHelper(self.__class__.__name__, str(self.label)) class Expr(Node): def __init__(self, start, end, expr): Node.__init__(self, start, end) self.expr = expr def Requires(self, node): # TODO(nnorwitz): impl. return False def __str__(self): return self._StringHelper(self.__class__.__name__, str(self.expr)) class Return(Expr): pass class Delete(Expr): pass class Friend(Expr): def __init__(self, start, end, expr, namespace): Expr.__init__(self, start, end, expr) self.namespace = namespace[:] class Using(Node): def __init__(self, start, end, names): Node.__init__(self, start, end) self.names = names def __str__(self): return self._StringHelper(self.__class__.__name__, str(self.names)) class Parameter(Node): def __init__(self, start, end, name, parameter_type, default): Node.__init__(self, start, end) self.name = name self.type = parameter_type self.default = default def Requires(self, node): # TODO(nnorwitz): handle namespaces, etc. return self.type.name == node.name def __str__(self): name = str(self.type) suffix = '%s %s' % (name, self.name) if self.default: suffix += ' = ' + ''.join([d.name for d in self.default]) return self._StringHelper(self.__class__.__name__, suffix) class _GenericDeclaration(Node): def __init__(self, start, end, name, namespace): Node.__init__(self, start, end) self.name = name self.namespace = namespace[:] def FullName(self): prefix = '' if self.namespace and self.namespace[-1]: prefix = '::'.join(self.namespace) + '::' return prefix + self.name def _TypeStringHelper(self, suffix): if self.namespace: names = [n or '<anonymous>' for n in self.namespace] suffix += ' in ' + '::'.join(names) return self._StringHelper(self.__class__.__name__, suffix) # TODO(nnorwitz): merge with Parameter in some way? class VariableDeclaration(_GenericDeclaration): def __init__(self, start, end, name, var_type, initial_value, namespace): _GenericDeclaration.__init__(self, start, end, name, namespace) self.type = var_type self.initial_value = initial_value def Requires(self, node): # TODO(nnorwitz): handle namespaces, etc. return self.type.name == node.name def ToString(self): """Return a string that tries to reconstitute the variable decl.""" suffix = '%s %s' % (self.type, self.name) if self.initial_value: suffix += ' = ' + self.initial_value return suffix def __str__(self): return self._StringHelper(self.__class__.__name__, self.ToString()) class Typedef(_GenericDeclaration): def __init__(self, start, end, name, alias, namespace): _GenericDeclaration.__init__(self, start, end, name, namespace) self.alias = alias def IsDefinition(self): return True def IsExportable(self): return True def Requires(self, node): # TODO(nnorwitz): handle namespaces, etc. name = node.name for token in self.alias: if token is not None and name == token.name: return True return False def __str__(self): suffix = '%s, %s' % (self.name, self.alias) return self._TypeStringHelper(suffix) class _NestedType(_GenericDeclaration): def __init__(self, start, end, name, fields, namespace): _GenericDeclaration.__init__(self, start, end, name, namespace) self.fields = fields def IsDefinition(self): return True def IsExportable(self): return True def __str__(self): suffix = '%s, {%s}' % (self.name, self.fields) return self._TypeStringHelper(suffix) class Union(_NestedType): pass class Enum(_NestedType): pass class Class(_GenericDeclaration): def __init__(self, start, end, name, bases, templated_types, body, namespace): _GenericDeclaration.__init__(self, start, end, name, namespace) self.bases = bases self.body = body self.templated_types = templated_types def IsDeclaration(self): return self.bases is None and self.body is None def IsDefinition(self): return not self.IsDeclaration() def IsExportable(self): return not self.IsDeclaration() def Requires(self, node): # TODO(nnorwitz): handle namespaces, etc. if self.bases: for token_list in self.bases: # TODO(nnorwitz): bases are tokens, do name comparison. for token in token_list: if token.name == node.name: return True # TODO(nnorwitz): search in body too. return False def __str__(self): name = self.name if self.templated_types: name += '<%s>' % self.templated_types suffix = '%s, %s, %s' % (name, self.bases, self.body) return self._TypeStringHelper(suffix) class Struct(Class): pass class Function(_GenericDeclaration): def __init__(self, start, end, name, return_type, parameters, modifiers, templated_types, body, namespace): _GenericDeclaration.__init__(self, start, end, name, namespace) converter = TypeConverter(namespace) self.return_type = converter.CreateReturnType(return_type) self.parameters = converter.ToParameters(parameters) self.modifiers = modifiers self.body = body self.templated_types = templated_types def IsDeclaration(self): return self.body is None def IsDefinition(self): return self.body is not None def IsExportable(self): if self.return_type and 'static' in self.return_type.modifiers: return False return None not in self.namespace def Requires(self, node): if self.parameters: # TODO(nnorwitz): parameters are tokens, do name comparison. for p in self.parameters: if p.name == node.name: return True # TODO(nnorwitz): search in body too. return False def __str__(self): # TODO(nnorwitz): add templated_types. suffix = ('%s %s(%s), 0x%02x, %s' % (self.return_type, self.name, self.parameters, self.modifiers, self.body)) return self._TypeStringHelper(suffix) class Method(Function): def __init__(self, start, end, name, in_class, return_type, parameters, modifiers, templated_types, body, namespace): Function.__init__(self, start, end, name, return_type, parameters, modifiers, templated_types, body, namespace) # TODO(nnorwitz): in_class could also be a namespace which can # mess up finding functions properly. self.in_class = in_class class Type(_GenericDeclaration): """Type used for any variable (eg class, primitive, struct, etc).""" def __init__(self, start, end, name, templated_types, modifiers, reference, pointer, array): """ Args: name: str name of main type templated_types: [Class (Type?)] template type info between <> modifiers: [str] type modifiers (keywords) eg, const, mutable, etc. reference, pointer, array: bools """ _GenericDeclaration.__init__(self, start, end, name, []) self.templated_types = templated_types if not name and modifiers: self.name = modifiers.pop() self.modifiers = modifiers self.reference = reference self.pointer = pointer self.array = array def __str__(self): prefix = '' if self.modifiers: prefix = ' '.join(self.modifiers) + ' ' name = str(self.name) if self.templated_types: name += '<%s>' % self.templated_types suffix = prefix + name if self.reference: suffix += '&' if self.pointer: suffix += '*' if self.array: suffix += '[]' return self._TypeStringHelper(suffix) # By definition, Is* are always False. A Type can only exist in # some sort of variable declaration, parameter, or return value. def IsDeclaration(self): return False def IsDefinition(self): return False def IsExportable(self): return False class TypeConverter(object): def __init__(self, namespace_stack): self.namespace_stack = namespace_stack def _GetTemplateEnd(self, tokens, start): count = 1 end = start while 1: token = tokens[end] end += 1 if token.name == '<': count += 1 elif token.name == '>': count -= 1 if count == 0: break return tokens[start:end-1], end def ToType(self, tokens): """Convert [Token,...] to [Class(...), ] useful for base classes. For example, code like class Foo : public Bar<x, y> { ... }; the "Bar<x, y>" portion gets converted to an AST. Returns: [Class(...), ...] """ result = [] name_tokens = [] reference = pointer = array = False def AddType(templated_types): # Partition tokens into name and modifier tokens. names = [] modifiers = [] for t in name_tokens: if keywords.IsKeyword(t.name): modifiers.append(t.name) else: names.append(t.name) name = ''.join(names) if name_tokens: result.append(Type(name_tokens[0].start, name_tokens[-1].end, name, templated_types, modifiers, reference, pointer, array)) del name_tokens[:] i = 0 end = len(tokens) while i < end: token = tokens[i] if token.name == '<': new_tokens, new_end = self._GetTemplateEnd(tokens, i+1) AddType(self.ToType(new_tokens)) # If there is a comma after the template, we need to consume # that here otherwise it becomes part of the name. i = new_end reference = pointer = array = False elif token.name == ',': AddType([]) reference = pointer = array = False elif token.name == '*': pointer = True elif token.name == '&': reference = True elif token.name == '[': pointer = True elif token.name == ']': pass else: name_tokens.append(token) i += 1 if name_tokens: # No '<' in the tokens, just a simple name and no template. AddType([]) return result def DeclarationToParts(self, parts, needs_name_removed): name = None default = [] if needs_name_removed: # Handle default (initial) values properly. for i, t in enumerate(parts): if t.name == '=': default = parts[i+1:] name = parts[i-1].name if name == ']' and parts[i-2].name == '[': name = parts[i-3].name i -= 1 parts = parts[:i-1] break else: if parts[-1].token_type == tokenize.NAME: name = parts.pop().name else: # TODO(nnorwitz): this is a hack that happens for code like # Register(Foo<T>); where it thinks this is a function call # but it's actually a declaration. name = '???' modifiers = [] type_name = [] other_tokens = [] templated_types = [] i = 0 end = len(parts) while i < end: p = parts[i] if keywords.IsKeyword(p.name): modifiers.append(p.name) elif p.name == '<': templated_tokens, new_end = self._GetTemplateEnd(parts, i+1) templated_types = self.ToType(templated_tokens) i = new_end - 1 # Don't add a spurious :: to data members being initialized. next_index = i + 1 if next_index < end and parts[next_index].name == '::': i += 1 elif p.name in ('[', ']', '='): # These are handled elsewhere. other_tokens.append(p) elif p.name not in ('*', '&', '>'): # Ensure that names have a space between them. if (type_name and type_name[-1].token_type == tokenize.NAME and p.token_type == tokenize.NAME): type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0)) type_name.append(p) else: other_tokens.append(p) i += 1 type_name = ''.join([t.name for t in type_name]) return name, type_name, templated_types, modifiers, default, other_tokens def ToParameters(self, tokens): if not tokens: return [] result = [] name = type_name = '' type_modifiers = [] pointer = reference = array = False first_token = None default = [] def AddParameter(end): if default: del default[0] # Remove flag. parts = self.DeclarationToParts(type_modifiers, True) (name, type_name, templated_types, modifiers, unused_default, unused_other_tokens) = parts parameter_type = Type(first_token.start, first_token.end, type_name, templated_types, modifiers, reference, pointer, array) p = Parameter(first_token.start, end, name, parameter_type, default) result.append(p) template_count = 0 for s in tokens: if not first_token: first_token = s if s.name == '<': template_count += 1 elif s.name == '>': template_count -= 1 if template_count > 0: type_modifiers.append(s) continue if s.name == ',': AddParameter(s.start) name = type_name = '' type_modifiers = [] pointer = reference = array = False first_token = None default = [] elif s.name == '*': pointer = True elif s.name == '&': reference = True elif s.name == '[': array = True elif s.name == ']': pass # Just don't add to type_modifiers. elif s.name == '=': # Got a default value. Add any value (None) as a flag. default.append(None) elif default: default.append(s) else: type_modifiers.append(s) AddParameter(tokens[-1].end) return result def CreateReturnType(self, return_type_seq): if not return_type_seq: return None start = return_type_seq[0].start end = return_type_seq[-1].end _, name, templated_types, modifiers, default, other_tokens = \ self.DeclarationToParts(return_type_seq, False) names = [n.name for n in other_tokens] reference = '&' in names pointer = '*' in names array = '[' in names return Type(start, end, name, templated_types, modifiers, reference, pointer, array) def GetTemplateIndices(self, names): # names is a list of strings. start = names.index('<') end = len(names) - 1 while end > 0: if names[end] == '>': break end -= 1 return start, end+1 class AstBuilder(object): def __init__(self, token_stream, filename, in_class='', visibility=None, namespace_stack=[]): self.tokens = token_stream self.filename = filename # TODO(nnorwitz): use a better data structure (deque) for the queue. # Switching directions of the "queue" improved perf by about 25%. # Using a deque should be even better since we access from both sides. self.token_queue = [] self.namespace_stack = namespace_stack[:] self.in_class = in_class if in_class is None: self.in_class_name_only = None else: self.in_class_name_only = in_class.split('::')[-1] self.visibility = visibility self.in_function = False self.current_token = None # Keep the state whether we are currently handling a typedef or not. self._handling_typedef = False self.converter = TypeConverter(self.namespace_stack) def HandleError(self, msg, token): printable_queue = list(reversed(self.token_queue[-20:])) sys.stderr.write('Got %s in %s @ %s %s\n' % (msg, self.filename, token, printable_queue)) def Generate(self): while 1: token = self._GetNextToken() if not token: break # Get the next token. self.current_token = token # Dispatch on the next token type. if token.token_type == _INTERNAL_TOKEN: if token.name == _NAMESPACE_POP: self.namespace_stack.pop() continue try: result = self._GenerateOne(token) if result is not None: yield result except: self.HandleError('exception', token) raise def _CreateVariable(self, pos_token, name, type_name, type_modifiers, ref_pointer_name_seq, templated_types, value=None): reference = '&' in ref_pointer_name_seq pointer = '*' in ref_pointer_name_seq array = '[' in ref_pointer_name_seq var_type = Type(pos_token.start, pos_token.end, type_name, templated_types, type_modifiers, reference, pointer, array) return VariableDeclaration(pos_token.start, pos_token.end, name, var_type, value, self.namespace_stack) def _GenerateOne(self, token): if token.token_type == tokenize.NAME: if (keywords.IsKeyword(token.name) and not keywords.IsBuiltinType(token.name)): method = getattr(self, 'handle_' + token.name) return method() elif token.name == self.in_class_name_only: # The token name is the same as the class, must be a ctor if # there is a paren. Otherwise, it's the return type. # Peek ahead to get the next token to figure out which. next = self._GetNextToken() self._AddBackToken(next) if next.token_type == tokenize.SYNTAX and next.name == '(': return self._GetMethod([token], FUNCTION_CTOR, None, True) # Fall through--handle like any other method. # Handle data or function declaration/definition. syntax = tokenize.SYNTAX temp_tokens, last_token = \ self._GetVarTokensUpTo(syntax, '(', ';', '{', '[') temp_tokens.insert(0, token) if last_token.name == '(': # If there is an assignment before the paren, # this is an expression, not a method. expr = bool([e for e in temp_tokens if e.name == '=']) if expr: new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';') temp_tokens.append(last_token) temp_tokens.extend(new_temp) last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0) if last_token.name == '[': # Handle array, this isn't a method, unless it's an operator. # TODO(nnorwitz): keep the size somewhere. # unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']') temp_tokens.append(last_token) if temp_tokens[-2].name == 'operator': temp_tokens.append(self._GetNextToken()) else: temp_tokens2, last_token = \ self._GetVarTokensUpTo(tokenize.SYNTAX, ';') temp_tokens.extend(temp_tokens2) if last_token.name == ';': # Handle data, this isn't a method. parts = self.converter.DeclarationToParts(temp_tokens, True) (name, type_name, templated_types, modifiers, default, unused_other_tokens) = parts t0 = temp_tokens[0] names = [t.name for t in temp_tokens] if templated_types: start, end = self.converter.GetTemplateIndices(names) names = names[:start] + names[end:] default = ''.join([t.name for t in default]) return self._CreateVariable(t0, name, type_name, modifiers, names, templated_types, default) if last_token.name == '{': self._AddBackTokens(temp_tokens[1:]) self._AddBackToken(last_token) method_name = temp_tokens[0].name method = getattr(self, 'handle_' + method_name, None) if not method: # Must be declaring a variable. # TODO(nnorwitz): handle the declaration. return None return method() return self._GetMethod(temp_tokens, 0, None, False) elif token.token_type == tokenize.SYNTAX: if token.name == '~' and self.in_class: # Must be a dtor (probably not in method body). token = self._GetNextToken() # self.in_class can contain A::Name, but the dtor will only # be Name. Make sure to compare against the right value. if (token.token_type == tokenize.NAME and token.name == self.in_class_name_only): return self._GetMethod([token], FUNCTION_DTOR, None, True) # TODO(nnorwitz): handle a lot more syntax. elif token.token_type == tokenize.PREPROCESSOR: # TODO(nnorwitz): handle more preprocessor directives. # token starts with a #, so remove it and strip whitespace. name = token.name[1:].lstrip() if name.startswith('include'): # Remove "include". name = name[7:].strip() assert name # Handle #include \<newline> "header-on-second-line.h". if name.startswith('\\'): name = name[1:].strip() assert name[0] in '<"', token assert name[-1] in '>"', token system = name[0] == '<' filename = name[1:-1] return Include(token.start, token.end, filename, system) if name.startswith('define'): # Remove "define". name = name[6:].strip() assert name value = '' for i, c in enumerate(name): if c.isspace(): value = name[i:].lstrip() name = name[:i] break return Define(token.start, token.end, name, value) if name.startswith('if') and name[2:3].isspace(): condition = name[3:].strip() if condition.startswith('0') or condition.startswith('(0)'): self._SkipIf0Blocks() return None def _GetTokensUpTo(self, expected_token_type, expected_token): return self._GetVarTokensUpTo(expected_token_type, expected_token)[0] def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens): last_token = self._GetNextToken() tokens = [] while (last_token.token_type != expected_token_type or last_token.name not in expected_tokens): tokens.append(last_token) last_token = self._GetNextToken() return tokens, last_token # TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary. def _IgnoreUpTo(self, token_type, token): unused_tokens = self._GetTokensUpTo(token_type, token) def _SkipIf0Blocks(self): count = 1 while 1: token = self._GetNextToken() if token.token_type != tokenize.PREPROCESSOR: continue name = token.name[1:].lstrip() if name.startswith('endif'): count -= 1 if count == 0: break elif name.startswith('if'): count += 1 def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None): if GetNextToken is None: GetNextToken = self._GetNextToken # Assumes the current token is open_paren and we will consume # and return up to the close_paren. count = 1 token = GetNextToken() while 1: if token.token_type == tokenize.SYNTAX: if token.name == open_paren: count += 1 elif token.name == close_paren: count -= 1 if count == 0: break yield token token = GetNextToken() yield token def _GetParameters(self): return self._GetMatchingChar('(', ')') def GetScope(self): return self._GetMatchingChar('{', '}') def _GetNextToken(self): if self.token_queue: return self.token_queue.pop() return next(self.tokens) def _AddBackToken(self, token): if token.whence == tokenize.WHENCE_STREAM: token.whence = tokenize.WHENCE_QUEUE self.token_queue.insert(0, token) else: assert token.whence == tokenize.WHENCE_QUEUE, token self.token_queue.append(token) def _AddBackTokens(self, tokens): if tokens: if tokens[-1].whence == tokenize.WHENCE_STREAM: for token in tokens: token.whence = tokenize.WHENCE_QUEUE self.token_queue[:0] = reversed(tokens) else: assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens self.token_queue.extend(reversed(tokens)) def GetName(self, seq=None): """Returns ([tokens], next_token_info).""" GetNextToken = self._GetNextToken if seq is not None: it = iter(seq) GetNextToken = lambda: next(it) next_token = GetNextToken() tokens = [] last_token_was_name = False while (next_token.token_type == tokenize.NAME or (next_token.token_type == tokenize.SYNTAX and next_token.name in ('::', '<'))): # Two NAMEs in a row means the identifier should terminate. # It's probably some sort of variable declaration. if last_token_was_name and next_token.token_type == tokenize.NAME: break last_token_was_name = next_token.token_type == tokenize.NAME tokens.append(next_token) # Handle templated names. if next_token.name == '<': tokens.extend(self._GetMatchingChar('<', '>', GetNextToken)) last_token_was_name = True next_token = GetNextToken() return tokens, next_token def GetMethod(self, modifiers, templated_types): return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') assert len(return_type_and_name) >= 1 return self._GetMethod(return_type_and_name, modifiers, templated_types, False) def _GetMethod(self, return_type_and_name, modifiers, templated_types, get_paren): template_portion = None if get_paren: token = self._GetNextToken() assert token.token_type == tokenize.SYNTAX, token if token.name == '<': # Handle templatized dtors. template_portion = [token] template_portion.extend(self._GetMatchingChar('<', '>')) token = self._GetNextToken() assert token.token_type == tokenize.SYNTAX, token assert token.name == '(', token name = return_type_and_name.pop() # Handle templatized ctors. if name.name == '>': index = 1 while return_type_and_name[index].name != '<': index += 1 template_portion = return_type_and_name[index:] + [name] del return_type_and_name[index:] name = return_type_and_name.pop() elif name.name == ']': rt = return_type_and_name assert rt[-1].name == '[', return_type_and_name assert rt[-2].name == 'operator', return_type_and_name name_seq = return_type_and_name[-2:] del return_type_and_name[-2:] name = tokenize.Token(tokenize.NAME, 'operator[]', name_seq[0].start, name.end) # Get the open paren so _GetParameters() below works. unused_open_paren = self._GetNextToken() # TODO(nnorwitz): store template_portion. return_type = return_type_and_name indices = name if return_type: indices = return_type[0] # Force ctor for templatized ctors. if name.name == self.in_class and not modifiers: modifiers |= FUNCTION_CTOR parameters = list(self._GetParameters()) del parameters[-1] # Remove trailing ')'. # Handling operator() is especially weird. if name.name == 'operator' and not parameters: token = self._GetNextToken() assert token.name == '(', token parameters = list(self._GetParameters()) del parameters[-1] # Remove trailing ')'. token = self._GetNextToken() while token.token_type == tokenize.NAME: modifier_token = token token = self._GetNextToken() if modifier_token.name == 'const': modifiers |= FUNCTION_CONST elif modifier_token.name == '__attribute__': # TODO(nnorwitz): handle more __attribute__ details. modifiers |= FUNCTION_ATTRIBUTE assert token.name == '(', token # Consume everything between the (parens). unused_tokens = list(self._GetMatchingChar('(', ')')) token = self._GetNextToken() elif modifier_token.name == 'throw': modifiers |= FUNCTION_THROW assert token.name == '(', token # Consume everything between the (parens). unused_tokens = list(self._GetMatchingChar('(', ')')) token = self._GetNextToken() elif modifier_token.name == 'override': modifiers |= FUNCTION_OVERRIDE elif modifier_token.name == modifier_token.name.upper(): # HACK(nnorwitz): assume that all upper-case names # are some macro we aren't expanding. modifiers |= FUNCTION_UNKNOWN_ANNOTATION else: self.HandleError('unexpected token', modifier_token) assert token.token_type == tokenize.SYNTAX, token # Handle ctor initializers. if token.name == ':': # TODO(nnorwitz): anything else to handle for initializer list? while token.name != ';' and token.name != '{': token = self._GetNextToken() # Handle pointer to functions that are really data but look # like method declarations. if token.name == '(': if parameters[0].name == '*': # name contains the return type. name = parameters.pop() # parameters contains the name of the data. modifiers = [p.name for p in parameters] # Already at the ( to open the parameter list. function_parameters = list(self._GetMatchingChar('(', ')')) del function_parameters[-1] # Remove trailing ')'. # TODO(nnorwitz): store the function_parameters. token = self._GetNextToken() assert token.token_type == tokenize.SYNTAX, token assert token.name == ';', token return self._CreateVariable(indices, name.name, indices.name, modifiers, '', None) # At this point, we got something like: # return_type (type::*name_)(params); # This is a data member called name_ that is a function pointer. # With this code: void (sq_type::*field_)(string&); # We get: name=void return_type=[] parameters=sq_type ... field_ # TODO(nnorwitz): is return_type always empty? # TODO(nnorwitz): this isn't even close to being correct. # Just put in something so we don't crash and can move on. real_name = parameters[-1] modifiers = [p.name for p in self._GetParameters()] del modifiers[-1] # Remove trailing ')'. return self._CreateVariable(indices, real_name.name, indices.name, modifiers, '', None) if token.name == '{': body = list(self.GetScope()) del body[-1] # Remove trailing '}'. else: body = None if token.name == '=': token = self._GetNextToken() if token.name == 'default' or token.name == 'delete': # Ignore explicitly defaulted and deleted special members # in C++11. token = self._GetNextToken() else: # Handle pure-virtual declarations. assert token.token_type == tokenize.CONSTANT, token assert token.name == '0', token modifiers |= FUNCTION_PURE_VIRTUAL token = self._GetNextToken() if token.name == '[': # TODO(nnorwitz): store tokens and improve parsing. # template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N]; tokens = list(self._GetMatchingChar('[', ']')) token = self._GetNextToken() assert token.name == ';', (token, return_type_and_name, parameters) # Looks like we got a method, not a function. if len(return_type) > 2 and return_type[-1].name == '::': return_type, in_class = \ self._GetReturnTypeAndClassName(return_type) return Method(indices.start, indices.end, name.name, in_class, return_type, parameters, modifiers, templated_types, body, self.namespace_stack) return Function(indices.start, indices.end, name.name, return_type, parameters, modifiers, templated_types, body, self.namespace_stack) def _GetReturnTypeAndClassName(self, token_seq): # Splitting the return type from the class name in a method # can be tricky. For example, Return::Type::Is::Hard::To::Find(). # Where is the return type and where is the class name? # The heuristic used is to pull the last name as the class name. # This includes all the templated type info. # TODO(nnorwitz): if there is only One name like in the # example above, punt and assume the last bit is the class name. # Ignore a :: prefix, if exists so we can find the first real name. i = 0 if token_seq[0].name == '::': i = 1 # Ignore a :: suffix, if exists. end = len(token_seq) - 1 if token_seq[end-1].name == '::': end -= 1 # Make a copy of the sequence so we can append a sentinel # value. This is required for GetName will has to have some # terminating condition beyond the last name. seq_copy = token_seq[i:end] seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0)) names = [] while i < end: # Iterate through the sequence parsing out each name. new_name, next = self.GetName(seq_copy[i:]) assert new_name, 'Got empty new_name, next=%s' % next # We got a pointer or ref. Add it to the name. if next and next.token_type == tokenize.SYNTAX: new_name.append(next) names.append(new_name) i += len(new_name) # Now that we have the names, it's time to undo what we did. # Remove the sentinel value. names[-1].pop() # Flatten the token sequence for the return type. return_type = [e for seq in names[:-1] for e in seq] # The class name is the last name. class_name = names[-1] return return_type, class_name def handle_bool(self): pass def handle_char(self): pass def handle_int(self): pass def handle_long(self): pass def handle_short(self): pass def handle_double(self): pass def handle_float(self): pass def handle_void(self): pass def handle_wchar_t(self): pass def handle_unsigned(self): pass def handle_signed(self): pass def _GetNestedType(self, ctor): name = None name_tokens, token = self.GetName() if name_tokens: name = ''.join([t.name for t in name_tokens]) # Handle forward declarations. if token.token_type == tokenize.SYNTAX and token.name == ';': return ctor(token.start, token.end, name, None, self.namespace_stack) if token.token_type == tokenize.NAME and self._handling_typedef: self._AddBackToken(token) return ctor(token.start, token.end, name, None, self.namespace_stack) # Must be the type declaration. fields = list(self._GetMatchingChar('{', '}')) del fields[-1] # Remove trailing '}'. if token.token_type == tokenize.SYNTAX and token.name == '{': next = self._GetNextToken() new_type = ctor(token.start, token.end, name, fields, self.namespace_stack) # A name means this is an anonymous type and the name # is the variable declaration. if next.token_type != tokenize.NAME: return new_type name = new_type token = next # Must be variable declaration using the type prefixed with keyword. assert token.token_type == tokenize.NAME, token return self._CreateVariable(token, token.name, name, [], '', None) def handle_struct(self): # Special case the handling typedef/aliasing of structs here. # It would be a pain to handle in the class code. name_tokens, var_token = self.GetName() if name_tokens: next_token = self._GetNextToken() is_syntax = (var_token.token_type == tokenize.SYNTAX and var_token.name[0] in '*&') is_variable = (var_token.token_type == tokenize.NAME and next_token.name == ';') variable = var_token if is_syntax and not is_variable: variable = next_token temp = self._GetNextToken() if temp.token_type == tokenize.SYNTAX and temp.name == '(': # Handle methods declared to return a struct. t0 = name_tokens[0] struct = tokenize.Token(tokenize.NAME, 'struct', t0.start-7, t0.start-2) type_and_name = [struct] type_and_name.extend(name_tokens) type_and_name.extend((var_token, next_token)) return self._GetMethod(type_and_name, 0, None, False) assert temp.name == ';', (temp, name_tokens, var_token) if is_syntax or (is_variable and not self._handling_typedef): modifiers = ['struct'] type_name = ''.join([t.name for t in name_tokens]) position = name_tokens[0] return self._CreateVariable(position, variable.name, type_name, modifiers, var_token.name, None) name_tokens.extend((var_token, next_token)) self._AddBackTokens(name_tokens) else: self._AddBackToken(var_token) return self._GetClass(Struct, VISIBILITY_PUBLIC, None) def handle_union(self): return self._GetNestedType(Union) def handle_enum(self): return self._GetNestedType(Enum) def handle_auto(self): # TODO(nnorwitz): warn about using auto? Probably not since it # will be reclaimed and useful for C++0x. pass def handle_register(self): pass def handle_const(self): pass def handle_inline(self): pass def handle_extern(self): pass def handle_static(self): pass def handle_virtual(self): # What follows must be a method. token = token2 = self._GetNextToken() if token.name == 'inline': # HACK(nnorwitz): handle inline dtors by ignoring 'inline'. token2 = self._GetNextToken() if token2.token_type == tokenize.SYNTAX and token2.name == '~': return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None) assert token.token_type == tokenize.NAME or token.name == '::', token return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # ) return_type_and_name.insert(0, token) if token2 is not token: return_type_and_name.insert(1, token2) return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL, None, False) def handle_volatile(self): pass def handle_mutable(self): pass def handle_public(self): assert self.in_class self.visibility = VISIBILITY_PUBLIC def handle_protected(self): assert self.in_class self.visibility = VISIBILITY_PROTECTED def handle_private(self): assert self.in_class self.visibility = VISIBILITY_PRIVATE def handle_friend(self): tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') assert tokens t0 = tokens[0] return Friend(t0.start, t0.end, tokens, self.namespace_stack) def handle_static_cast(self): pass def handle_const_cast(self): pass def handle_dynamic_cast(self): pass def handle_reinterpret_cast(self): pass def handle_new(self): pass def handle_delete(self): tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') assert tokens return Delete(tokens[0].start, tokens[0].end, tokens) def handle_typedef(self): token = self._GetNextToken() if (token.token_type == tokenize.NAME and keywords.IsKeyword(token.name)): # Token must be struct/enum/union/class. method = getattr(self, 'handle_' + token.name) self._handling_typedef = True tokens = [method()] self._handling_typedef = False else: tokens = [token] # Get the remainder of the typedef up to the semi-colon. tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';')) # TODO(nnorwitz): clean all this up. assert tokens name = tokens.pop() indices = name if tokens: indices = tokens[0] if not indices: indices = token if name.name == ')': # HACK(nnorwitz): Handle pointers to functions "properly". if (len(tokens) >= 4 and tokens[1].name == '(' and tokens[2].name == '*'): tokens.append(name) name = tokens[3] elif name.name == ']': # HACK(nnorwitz): Handle arrays properly. if len(tokens) >= 2: tokens.append(name) name = tokens[1] new_type = tokens if tokens and isinstance(tokens[0], tokenize.Token): new_type = self.converter.ToType(tokens)[0] return Typedef(indices.start, indices.end, name.name, new_type, self.namespace_stack) def handle_typeid(self): pass # Not needed yet. def handle_typename(self): pass # Not needed yet. def _GetTemplatedTypes(self): result = {} tokens = list(self._GetMatchingChar('<', '>')) len_tokens = len(tokens) - 1 # Ignore trailing '>'. i = 0 while i < len_tokens: key = tokens[i].name i += 1 if keywords.IsKeyword(key) or key == ',': continue type_name = default = None if i < len_tokens: i += 1 if tokens[i-1].name == '=': assert i < len_tokens, '%s %s' % (i, tokens) default, unused_next_token = self.GetName(tokens[i:]) i += len(default) else: if tokens[i-1].name != ',': # We got something like: Type variable. # Re-adjust the key (variable) and type_name (Type). key = tokens[i-1].name type_name = tokens[i-2] result[key] = (type_name, default) return result def handle_template(self): token = self._GetNextToken() assert token.token_type == tokenize.SYNTAX, token assert token.name == '<', token templated_types = self._GetTemplatedTypes() # TODO(nnorwitz): for now, just ignore the template params. token = self._GetNextToken() if token.token_type == tokenize.NAME: if token.name == 'class': return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types) elif token.name == 'struct': return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types) elif token.name == 'friend': return self.handle_friend() self._AddBackToken(token) tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';') tokens.append(last) self._AddBackTokens(tokens) if last.name == '(': return self.GetMethod(FUNCTION_NONE, templated_types) # Must be a variable definition. return None def handle_true(self): pass # Nothing to do. def handle_false(self): pass # Nothing to do. def handle_asm(self): pass # Not needed yet. def handle_class(self): return self._GetClass(Class, VISIBILITY_PRIVATE, None) def _GetBases(self): # Get base classes. bases = [] while 1: token = self._GetNextToken() assert token.token_type == tokenize.NAME, token # TODO(nnorwitz): store kind of inheritance...maybe. if token.name not in ('public', 'protected', 'private'): # If inheritance type is not specified, it is private. # Just put the token back so we can form a name. # TODO(nnorwitz): it would be good to warn about this. self._AddBackToken(token) else: # Check for virtual inheritance. token = self._GetNextToken() if token.name != 'virtual': self._AddBackToken(token) else: # TODO(nnorwitz): store that we got virtual for this base. pass base, next_token = self.GetName() bases_ast = self.converter.ToType(base) assert len(bases_ast) == 1, bases_ast bases.append(bases_ast[0]) assert next_token.token_type == tokenize.SYNTAX, next_token if next_token.name == '{': token = next_token break # Support multiple inheritance. assert next_token.name == ',', next_token return bases, token def _GetClass(self, class_type, visibility, templated_types): class_name = None class_token = self._GetNextToken() if class_token.token_type != tokenize.NAME: assert class_token.token_type == tokenize.SYNTAX, class_token token = class_token else: # Skip any macro (e.g. storage class specifiers) after the # 'class' keyword. next_token = self._GetNextToken() if next_token.token_type == tokenize.NAME: self._AddBackToken(next_token) else: self._AddBackTokens([class_token, next_token]) name_tokens, token = self.GetName() class_name = ''.join([t.name for t in name_tokens]) bases = None if token.token_type == tokenize.SYNTAX: if token.name == ';': # Forward declaration. return class_type(class_token.start, class_token.end, class_name, None, templated_types, None, self.namespace_stack) if token.name in '*&': # Inline forward declaration. Could be method or data. name_token = self._GetNextToken() next_token = self._GetNextToken() if next_token.name == ';': # Handle data modifiers = ['class'] return self._CreateVariable(class_token, name_token.name, class_name, modifiers, token.name, None) else: # Assume this is a method. tokens = (class_token, token, name_token, next_token) self._AddBackTokens(tokens) return self.GetMethod(FUNCTION_NONE, None) if token.name == ':': bases, token = self._GetBases() body = None if token.token_type == tokenize.SYNTAX and token.name == '{': assert token.token_type == tokenize.SYNTAX, token assert token.name == '{', token ast = AstBuilder(self.GetScope(), self.filename, class_name, visibility, self.namespace_stack) body = list(ast.Generate()) if not self._handling_typedef: token = self._GetNextToken() if token.token_type != tokenize.NAME: assert token.token_type == tokenize.SYNTAX, token assert token.name == ';', token else: new_class = class_type(class_token.start, class_token.end, class_name, bases, None, body, self.namespace_stack) modifiers = [] return self._CreateVariable(class_token, token.name, new_class, modifiers, token.name, None) else: if not self._handling_typedef: self.HandleError('non-typedef token', token) self._AddBackToken(token) return class_type(class_token.start, class_token.end, class_name, bases, templated_types, body, self.namespace_stack) def handle_namespace(self): token = self._GetNextToken() # Support anonymous namespaces. name = None if token.token_type == tokenize.NAME: name = token.name token = self._GetNextToken() self.namespace_stack.append(name) assert token.token_type == tokenize.SYNTAX, token # Create an internal token that denotes when the namespace is complete. internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP, None, None) internal_token.whence = token.whence if token.name == '=': # TODO(nnorwitz): handle aliasing namespaces. name, next_token = self.GetName() assert next_token.name == ';', next_token self._AddBackToken(internal_token) else: assert token.name == '{', token tokens = list(self.GetScope()) # Replace the trailing } with the internal namespace pop token. tokens[-1] = internal_token # Handle namespace with nothing in it. self._AddBackTokens(tokens) return None def handle_using(self): tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') assert tokens return Using(tokens[0].start, tokens[0].end, tokens) def handle_explicit(self): assert self.in_class # Nothing much to do. # TODO(nnorwitz): maybe verify the method name == class name. # This must be a ctor. return self.GetMethod(FUNCTION_CTOR, None) def handle_this(self): pass # Nothing to do. def handle_operator(self): # Pull off the next token(s?) and make that part of the method name. pass def handle_sizeof(self): pass def handle_case(self): pass def handle_switch(self): pass def handle_default(self): token = self._GetNextToken() assert token.token_type == tokenize.SYNTAX assert token.name == ':' def handle_if(self): pass def handle_else(self): pass def handle_return(self): tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') if not tokens: return Return(self.current_token.start, self.current_token.end, None) return Return(tokens[0].start, tokens[0].end, tokens) def handle_goto(self): tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') assert len(tokens) == 1, str(tokens) return Goto(tokens[0].start, tokens[0].end, tokens[0].name) def handle_try(self): pass # Not needed yet. def handle_catch(self): pass # Not needed yet. def handle_throw(self): pass # Not needed yet. def handle_while(self): pass def handle_do(self): pass def handle_for(self): pass def handle_break(self): self._IgnoreUpTo(tokenize.SYNTAX, ';') def handle_continue(self): self._IgnoreUpTo(tokenize.SYNTAX, ';') def BuilderFromSource(source, filename): """Utility method that returns an AstBuilder from source code. Args: source: 'C++ source code' filename: 'file1' Returns: AstBuilder """ return AstBuilder(tokenize.GetTokens(source), filename) def PrintIndentifiers(filename, should_print): """Prints all identifiers for a C++ source file. Args: filename: 'file1' should_print: predicate with signature: bool Function(token) """ source = utils.ReadFile(filename, False) if source is None: sys.stderr.write('Unable to find: %s\n' % filename) return #print('Processing %s' % actual_filename) builder = BuilderFromSource(source, filename) try: for node in builder.Generate(): if should_print(node): print(node.name) except KeyboardInterrupt: return except: pass def PrintAllIndentifiers(filenames, should_print): """Prints all identifiers for each C++ source file in filenames. Args: filenames: ['file1', 'file2', ...] should_print: predicate with signature: bool Function(token) """ for path in filenames: PrintIndentifiers(path, should_print) def main(argv): for filename in argv[1:]: source = utils.ReadFile(filename) if source is None: continue print('Processing %s' % filename) builder = BuilderFromSource(source, filename) try: entire_ast = filter(None, builder.Generate()) except KeyboardInterrupt: return except: # Already printed a warning, print the traceback and continue. traceback.print_exc() else: if utils.DEBUG: for ast in entire_ast: print(ast) if __name__ == '__main__': main(sys.argv)
mit
2,722,094,682,581,519,400
3,297,391,043,870,577,000
35.221581
82
0.541499
false
plamut/superdesk
server/apps/legal_archive/service.py
4
6656
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import logging from eve.versioning import versioned_id_field from flask import g, current_app as app from eve.utils import config, ParsedRequest from .resource import LEGAL_ARCHIVE_NAME from superdesk import Service, get_resource_privileges from superdesk.errors import SuperdeskApiError from superdesk.metadata.item import ITEM_TYPE, GUID_FIELD, CONTENT_TYPE from superdesk.metadata.packages import GROUPS, RESIDREF, REFS from superdesk.utils import ListCursor logger = logging.getLogger(__name__) class LegalService(Service): """ Base Service Class for Legal Archive related services """ def on_create(self, docs): """ Overriding to replace the location of each item in the package to legal archive instead of archive, if doc is a pacakge. """ super().on_create(docs) for doc in docs: if ITEM_TYPE in doc: doc.setdefault(config.ID_FIELD, doc[GUID_FIELD]) if doc[ITEM_TYPE] == CONTENT_TYPE.COMPOSITE: self._change_location_of_items_in_package(doc) def on_replace(self, document, original): """ Overriding to replace the location of each item in the package to legal archive instead of archive, if doc is a pacakge. """ super().on_replace(document, original) if document.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE: self._change_location_of_items_in_package(document) def get(self, req, lookup): """ Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized then request is forwarded otherwise throws forbidden error. :return: list of docs matching query in req and lookup :raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources. """ self.check_get_access_privilege() return super().get(req, lookup) def find_one(self, req, **lookup): """ Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized then request is forwarded otherwise throws forbidden error. :return: doc if there is one matching the query in req and lookup :raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources. """ self.check_get_access_privilege() return super().find_one(req, **lookup) def check_get_access_privilege(self): """ Checks if user is authorized to perform get operation on Legal Archive resources. If authorized then request is forwarded otherwise throws forbidden error. :raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources. """ if not hasattr(g, 'user'): return privileges = g.user.get('active_privileges', {}) resource_privileges = get_resource_privileges(self.datasource).get('GET', None) if privileges.get(resource_privileges, 0) == 0: raise SuperdeskApiError.forbiddenError() def enhance(self, legal_archive_docs): """ Enhances the item in Legal Archive Service :param legal_archive_docs: """ if isinstance(legal_archive_docs, list): for legal_archive_doc in legal_archive_docs: legal_archive_doc['_type'] = LEGAL_ARCHIVE_NAME else: legal_archive_docs['_type'] = LEGAL_ARCHIVE_NAME def _change_location_of_items_in_package(self, package): """ Changes location of each item in the package to legal archive instead of archive. """ for group in package.get(GROUPS, []): for ref in group.get(REFS, []): if RESIDREF in ref: ref['location'] = LEGAL_ARCHIVE_NAME class LegalArchiveService(LegalService): def on_fetched(self, docs): """ Overriding this to enhance the published article with the one in archive collection """ self.enhance(docs[config.ITEMS]) def on_fetched_item(self, doc): """ Overriding this to enhance the published article with the one in archive collection """ self.enhance(doc) class LegalPublishQueueService(LegalService): def create(self, docs, **kwargs): """ Overriding this from preventing the transmission details again. This happens when an item in a package expires at later point of time. In this case, the call to insert transmission details happens twice once when the package expires and once when the item expires. """ ids = [] for doc in docs: doc_if_exists = self.find_one(req=None, _id=doc['_id']) if doc_if_exists is None: ids.extend(super().create([doc])) return ids class LegalArchiveVersionsService(LegalService): def create(self, docs, **kwargs): """ Overriding this from preventing the same version again. This happens when an item is published more than once. """ ids = [] for doc in docs: doc_if_exists = None if config.ID_FIELD in doc: # This happens when inserting docs from pre-populate command doc_if_exists = self.find_one(req=None, _id=doc['_id']) if doc_if_exists is None: ids.extend(super().create([doc])) return ids def get(self, req, lookup): """ Version of an article in Legal Archive isn't maintained by Eve. Overriding this to fetch the version history. """ resource_def = app.config['DOMAIN'][LEGAL_ARCHIVE_NAME] id_field = versioned_id_field(resource_def) if req and req.args and req.args.get(config.ID_FIELD): version_history = list(super().get_from_mongo(req=ParsedRequest(), lookup={id_field: req.args.get(config.ID_FIELD)})) else: version_history = list(super().get_from_mongo(req=req, lookup=lookup)) for doc in version_history: doc[config.ID_FIELD] = doc[id_field] self.enhance(doc) return ListCursor(version_history)
agpl-3.0
-4,373,273,393,604,016,000
-1,137,542,623,729,656,800
34.404255
119
0.63762
false
dzz007/photivo
scons-local-2.2.0/SCons/Tool/mwld.py
14
3666
"""SCons.Tool.mwld Tool-specific initialization for the Metrowerks CodeWarrior linker. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/mwld.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" import SCons.Tool def generate(env): """Add Builders and construction variables for lib to an Environment.""" SCons.Tool.createStaticLibBuilder(env) SCons.Tool.createSharedLibBuilder(env) SCons.Tool.createProgBuilder(env) env['AR'] = 'mwld' env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES' env['LIBDIRPREFIX'] = '-L' env['LIBDIRSUFFIX'] = '' env['LIBLINKPREFIX'] = '-l' env['LIBLINKSUFFIX'] = '.lib' env['LINK'] = 'mwld' env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' env['SHLINK'] = '$LINK' env['SHLINKFLAGS'] = '$LINKFLAGS' env['SHLINKCOM'] = shlib_action env['SHLIBEMITTER']= shlib_emitter def exists(env): import SCons.Tool.mwcc return SCons.Tool.mwcc.set_vars(env) def shlib_generator(target, source, env, for_signature): cmd = ['$SHLINK', '$SHLINKFLAGS', '-shared'] no_import_lib = env.get('no_import_lib', 0) if no_import_lib: cmd.extend('-noimplib') dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX') if dll: cmd.extend(['-o', dll]) implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX') if implib: cmd.extend(['-implib', implib.get_string(for_signature)]) cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS']) return [cmd] def shlib_emitter(target, source, env): dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX') no_import_lib = env.get('no_import_lib', 0) if not dll: raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX")) if not no_import_lib and \ not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'): # Append an import library to the list of targets. target.append(env.ReplaceIxes(dll, 'SHLIBPREFIX', 'SHLIBSUFFIX', 'LIBPREFIX', 'LIBSUFFIX')) return target, source shlib_action = SCons.Action.Action(shlib_generator, generator=1) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-3.0
688,066,444,426,008,400
3,188,319,646,208,421,400
33.261682
135
0.689034
false
Lektorium-LLC/edx-platform
openedx/core/lib/token_utils.py
11
4236
"""Utilities for working with ID tokens.""" import json from time import time from Cryptodome.PublicKey import RSA from django.conf import settings from django.utils.functional import cached_property from jwkest.jwk import KEYS, RSAKey from jwkest.jws import JWS from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from student.models import UserProfile, anonymous_id_for_user class JwtBuilder(object): """Utility for building JWTs. Unifies diverse approaches to JWT creation in a single class. This utility defaults to using the system's JWT configuration. NOTE: This utility class will allow you to override the signing key and audience claim to support those clients which still require this. This approach to JWT creation is DEPRECATED. Avoid doing this for new clients. Arguments: user (User): User for which to generate the JWT. Keyword Arguments: asymmetric (Boolean): Whether the JWT should be signed with this app's private key. secret (string): Overrides configured JWT secret (signing) key. Unused if an asymmetric signature is requested. """ def __init__(self, user, asymmetric=False, secret=None): self.user = user self.asymmetric = asymmetric self.secret = secret self.jwt_auth = configuration_helpers.get_value('JWT_AUTH', settings.JWT_AUTH) def build_token(self, scopes, expires_in=None, aud=None, additional_claims=None): """Returns a JWT access token. Arguments: scopes (list): Scopes controlling which optional claims are included in the token. Keyword Arguments: expires_in (int): Time to token expiry, specified in seconds. aud (string): Overrides configured JWT audience claim. additional_claims (dict): Additional claims to include in the token. Returns: str: Encoded JWT """ now = int(time()) expires_in = expires_in or self.jwt_auth['JWT_EXPIRATION'] payload = { # TODO Consider getting rid of this claim since we don't use it. 'aud': aud if aud else self.jwt_auth['JWT_AUDIENCE'], 'exp': now + expires_in, 'iat': now, 'iss': self.jwt_auth['JWT_ISSUER'], 'preferred_username': self.user.username, 'scopes': scopes, 'sub': anonymous_id_for_user(self.user, None), } if additional_claims: payload.update(additional_claims) for scope in scopes: handler = self.claim_handlers.get(scope) if handler: handler(payload) return self.encode(payload) @cached_property def claim_handlers(self): """Returns a dictionary mapping scopes to methods that will add claims to the JWT payload.""" return { 'email': self.attach_email_claim, 'profile': self.attach_profile_claim } def attach_email_claim(self, payload): """Add the email claim details to the JWT payload.""" payload['email'] = self.user.email def attach_profile_claim(self, payload): """Add the profile claim details to the JWT payload.""" try: # Some users (e.g., service users) may not have user profiles. name = UserProfile.objects.get(user=self.user).name except UserProfile.DoesNotExist: name = None payload.update({ 'name': name, 'family_name': self.user.last_name, 'given_name': self.user.first_name, 'administrator': self.user.is_staff, }) def encode(self, payload): """Encode the provided payload.""" keys = KEYS() if self.asymmetric: keys.add(RSAKey(key=RSA.importKey(settings.JWT_PRIVATE_SIGNING_KEY))) algorithm = 'RS512' else: key = self.secret if self.secret else self.jwt_auth['JWT_SECRET_KEY'] keys.add({'key': key, 'kty': 'oct'}) algorithm = self.jwt_auth['JWT_ALGORITHM'] data = json.dumps(payload) jws = JWS(data, alg=algorithm) return jws.sign_compact(keys=keys)
agpl-3.0
-6,670,726,015,398,278,000
-5,013,787,694,467,542,000
34.898305
119
0.628659
false
8u1a/plaso
plaso/parsers/java_idx.py
3
8539
# -*- coding: utf-8 -*- """Parser for Java Cache IDX files.""" # TODO: # * 6.02 files did not retain IP addresses. However, the # deploy_resource_codebase header field may contain the host IP. # This needs to be researched further, as that field may not always # be present. 6.02 files will currently return 'Unknown'. import os import construct from plaso.events import time_events from plaso.lib import errors from plaso.lib import eventdata from plaso.lib import timelib from plaso.parsers import interface from plaso.parsers import manager class JavaIDXEvent(time_events.TimestampEvent): """Convenience class for a Java IDX cache file download event.""" DATA_TYPE = u'java:download:idx' def __init__( self, timestamp, timestamp_description, idx_version, url, ip_address): """Initializes the event object. Args: timestamp: The timestamp value. timestamp_description: The description of the usage of the time value. idx_version: Version of IDX file. url: URL of the downloaded file. ip_address: IP address of the host in the URL. """ super(JavaIDXEvent, self).__init__(timestamp, timestamp_description) self.idx_version = idx_version self.url = url self.ip_address = ip_address class JavaIDXParser(interface.SingleFileBaseParser): """Parse Java WebStart Cache IDX files for download events. There are five structures defined. 6.02 files had one generic section that retained all data. From 6.03, the file went to a multi-section format where later sections were optional and had variable-lengths. 6.03, 6.04, and 6.05 files all have their main data section (#2) begin at offset 128. The short structure is because 6.05 files deviate after the 8th byte. So, grab the first 8 bytes to ensure it's valid, get the file version, then continue on with the correct structures. """ _INITIAL_FILE_OFFSET = None NAME = u'java_idx' DESCRIPTION = u'Parser for Java WebStart Cache IDX files.' IDX_SHORT_STRUCT = construct.Struct( u'magic', construct.UBInt8(u'busy'), construct.UBInt8(u'incomplete'), construct.UBInt32(u'idx_version')) IDX_602_STRUCT = construct.Struct( u'IDX_602_Full', construct.UBInt16(u'null_space'), construct.UBInt8(u'shortcut'), construct.UBInt32(u'content_length'), construct.UBInt64(u'last_modified_date'), construct.UBInt64(u'expiration_date'), construct.PascalString( u'version_string', length_field=construct.UBInt16(u'length')), construct.PascalString( u'url', length_field=construct.UBInt16(u'length')), construct.PascalString( u'namespace', length_field=construct.UBInt16(u'length')), construct.UBInt32(u'FieldCount')) IDX_605_SECTION_ONE_STRUCT = construct.Struct( u'IDX_605_Section1', construct.UBInt8(u'shortcut'), construct.UBInt32(u'content_length'), construct.UBInt64(u'last_modified_date'), construct.UBInt64(u'expiration_date'), construct.UBInt64(u'validation_date'), construct.UBInt8(u'signed'), construct.UBInt32(u'sec2len'), construct.UBInt32(u'sec3len'), construct.UBInt32(u'sec4len')) IDX_605_SECTION_TWO_STRUCT = construct.Struct( u'IDX_605_Section2', construct.PascalString( u'version', length_field=construct.UBInt16(u'length')), construct.PascalString( u'url', length_field=construct.UBInt16(u'length')), construct.PascalString( u'namespec', length_field=construct.UBInt16(u'length')), construct.PascalString( u'ip_address', length_field=construct.UBInt16(u'length')), construct.UBInt32(u'FieldCount')) # Java uses Pascal-style strings, but with a 2-byte length field. JAVA_READUTF_STRING = construct.Struct( u'Java.ReadUTF', construct.PascalString( u'string', length_field=construct.UBInt16(u'length'))) def ParseFileObject(self, parser_mediator, file_object, **kwargs): """Parses a Java WebStart Cache IDX file-like object. Args: parser_mediator: A parser context object (instance of ParserContext). file_object: A file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_object.seek(0, os.SEEK_SET) try: magic = self.IDX_SHORT_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: raise errors.UnableToParseFile( u'Unable to parse Java IDX file with error: {0:s}.'.format(exception)) # Fields magic.busy and magic.incomplete are normally 0x00. They # are set to 0x01 if the file is currently being downloaded. Logic # checks for > 1 to avoid a race condition and still reject any # file with other data. # Field magic.idx_version is the file version, of which only # certain versions are supported. if magic.busy > 1 or magic.incomplete > 1: raise errors.UnableToParseFile(u'Not a valid Java IDX file') if not magic.idx_version in [602, 603, 604, 605]: raise errors.UnableToParseFile(u'Not a valid Java IDX file') # Obtain the relevant values from the file. The last modified date # denotes when the file was last modified on the HOST. For example, # when the file was uploaded to a web server. if magic.idx_version == 602: section_one = self.IDX_602_STRUCT.parse_stream(file_object) last_modified_date = section_one.last_modified_date url = section_one.url ip_address = u'Unknown' http_header_count = section_one.FieldCount elif magic.idx_version in [603, 604, 605]: # IDX 6.03 and 6.04 have two unused bytes before the structure. if magic.idx_version in [603, 604]: file_object.read(2) # IDX 6.03, 6.04, and 6.05 files use the same structures for the # remaining data. section_one = self.IDX_605_SECTION_ONE_STRUCT.parse_stream(file_object) last_modified_date = section_one.last_modified_date if file_object.get_size() > 128: file_object.seek(128, os.SEEK_SET) # Static offset for section 2. section_two = self.IDX_605_SECTION_TWO_STRUCT.parse_stream(file_object) url = section_two.url ip_address = section_two.ip_address http_header_count = section_two.FieldCount else: url = u'Unknown' ip_address = u'Unknown' http_header_count = 0 # File offset is now just prior to HTTP headers. Make sure there # are headers, and then parse them to retrieve the download date. download_date = None for field in range(0, http_header_count): field = self.JAVA_READUTF_STRING.parse_stream(file_object) value = self.JAVA_READUTF_STRING.parse_stream(file_object) if field.string == u'date': # Time string "should" be in UTC or have an associated time zone # information in the string itself. If that is not the case then # there is no reliable method for plaso to determine the proper # timezone, so the assumption is that it is UTC. try: download_date = timelib.Timestamp.FromTimeString( value.string, gmt_as_timezone=False) except errors.TimestampError: download_date = None parser_mediator.ProduceParseError( u'Unable to parse time value: {0:s}'.format(value.string)) if not url or not ip_address: raise errors.UnableToParseFile( u'Unexpected Error: URL or IP address not found in file.') last_modified_timestamp = timelib.Timestamp.FromJavaTime( last_modified_date) # TODO: Move the timestamp description fields into eventdata. event_object = JavaIDXEvent( last_modified_timestamp, u'File Hosted Date', magic.idx_version, url, ip_address) parser_mediator.ProduceEvent(event_object) if section_one: expiration_date = section_one.get(u'expiration_date', None) if expiration_date: expiration_timestamp = timelib.Timestamp.FromJavaTime(expiration_date) event_object = JavaIDXEvent( expiration_timestamp, u'File Expiration Date', magic.idx_version, url, ip_address) parser_mediator.ProduceEvent(event_object) if download_date: event_object = JavaIDXEvent( download_date, eventdata.EventTimestamp.FILE_DOWNLOADED, magic.idx_version, url, ip_address) parser_mediator.ProduceEvent(event_object) manager.ParsersManager.RegisterParser(JavaIDXParser)
apache-2.0
-4,329,805,169,498,094,000
-7,993,736,771,927,482,000
37.990868
80
0.683804
false
snyaggarwal/pex
tests/test_environment.py
5
3792
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os from contextlib import contextmanager from twitter.common.contextutil import temporary_dir from pex.compatibility import nested from pex.environment import PEXEnvironment from pex.pex_builder import PEXBuilder from pex.pex_info import PexInfo from pex.testing import make_bdist, temporary_filename @contextmanager def yield_pex_builder(zip_safe=True): with nested(temporary_dir(), make_bdist('p1', zipped=True, zip_safe=zip_safe)) as (td, p1): pb = PEXBuilder(path=td) pb.add_egg(p1.location) yield pb def test_force_local(): with nested(yield_pex_builder(), temporary_dir(), temporary_filename()) as ( pb, pex_root, pex_file): pb.info.pex_root = pex_root pb.build(pex_file) code_cache = PEXEnvironment.force_local(pex_file, pb.info) assert os.path.exists(pb.info.zip_unsafe_cache) assert len(os.listdir(pb.info.zip_unsafe_cache)) == 1 assert [os.path.basename(code_cache)] == os.listdir(pb.info.zip_unsafe_cache) assert set(os.listdir(code_cache)) == set([PexInfo.PATH, '__main__.py', '__main__.pyc']) # idempotence assert PEXEnvironment.force_local(pex_file, pb.info) == code_cache def normalize(path): return os.path.normpath(os.path.realpath(path)).lower() def test_write_zipped_internal_cache(): # zip_safe pex will not be written to install cache unless always_write_cache with nested(yield_pex_builder(zip_safe=True), temporary_dir(), temporary_filename()) as ( pb, pex_root, pex_file): pb.info.pex_root = pex_root pb.build(pex_file) existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info) assert len(zip_safe) == 1 assert normalize(zip_safe[0].location).startswith( normalize(os.path.join(pex_file, pb.info.internal_cache))), ( 'loc: %s, cache: %s' % ( normalize(zip_safe[0].location), normalize(os.path.join(pex_file, pb.info.internal_cache)))) pb.info.always_write_cache = True existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info) assert len(new) == 1 assert normalize(new[0].location).startswith(normalize(pb.info.install_cache)) # Check that we can read from the cache existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info) assert len(existing) == 1 assert normalize(existing[0].location).startswith(normalize(pb.info.install_cache)) # non-zip_safe pex will be written to install cache with nested(yield_pex_builder(zip_safe=False), temporary_dir(), temporary_filename()) as ( pb, pex_root, pex_file): pb.info.pex_root = pex_root pb.build(pex_file) existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info) assert len(new) == 1 assert normalize(new[0].location).startswith(normalize(pb.info.install_cache)) original_location = normalize(new[0].location) # do the second time to validate idempotence of caching existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info) assert len(existing) == 1 assert normalize(existing[0].location) == original_location def test_load_internal_cache_unzipped(): # zip_safe pex will not be written to install cache unless always_write_cache with nested(yield_pex_builder(zip_safe=True), temporary_dir()) as (pb, pex_root): pb.info.pex_root = pex_root pb.freeze() dists = list(PEXEnvironment.load_internal_cache(pb.path(), pb.info)) assert len(dists) == 1 assert normalize(dists[0].location).startswith( normalize(os.path.join(pb.path(), pb.info.internal_cache)))
apache-2.0
-917,576,960,650,978,400
-692,005,278,283,953,000
38.092784
93
0.703323
false
MIPS/external-chromium_org-third_party-skia
tools/test_pdfs.py
231
1801
''' Compares the rendererings of serialized SkPictures to expected images. Launch with --help to see more information. Copyright 2012 Google Inc. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. ''' # common Python modules import os import optparse import sys import shutil import tempfile import test_rendering USAGE_STRING = 'Usage: %s input... expectedDir' HELP_STRING = ''' Takes input SkPicture files and renders them as PDF files, and then compares those resulting PDF files against PDF files found in expectedDir. Each instance of "input" can be either a file (name must end in .skp), or a directory (in which case this script will process all .skp files within the directory). ''' def Main(args): """Allow other scripts to call this script with fake command-line args. @param The commandline argument list """ parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING) parser.add_option('--render_dir', dest='render_dir', help = ('specify the location to output the rendered ' 'files. Default is a temp directory.')) parser.add_option('--diff_dir', dest='diff_dir', help = ('specify the location to output the diff files. ' 'Default is a temp directory.')) options, arguments = parser.parse_args(args) if (len(arguments) < 3): print("Expected at least one input and one ouput folder.") parser.print_help() sys.exit(-1) inputs = arguments[1:-1] expected_dir = arguments[-1] test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir, options.diff_dir, 'render_pdfs', '') if __name__ == '__main__': Main(sys.argv)
bsd-3-clause
1,692,586,173,178,254,600
2,311,523,801,225,367,000
29.016667
79
0.659634
false
unix-beard/matasano
set1/detect_single_character_xor/detect_single_character_xor.py
1
1514
#!/usr/bin/env python3 ################################################################################ # The matasano crypto challenges # http://cryptopals.com/sets/1/challenges/4/ # Set 1 Challenge 4 # Detect single-character XOR ################################################################################ # One of the 60-character strings in the input file has been encrypted # by single-character XOR. Find it. # Key: int=53, char='5' # Message: Now that the party is jumping # # NOTE: This implementation is strictly sequential ################################################################################ import sys import string def find_key(key, tuple_): return chr(int(tuple_[0] + tuple_[1], base=16) ^ key) def decode_with_key(key, s): decoded_msg = '' for t in zip(s[0::2], s[1::2]): decoded_msg += find_key(key, t) if len([c for c in decoded_msg if c in string.ascii_letters + ' \n']) == len(decoded_msg): print('[*] Trying the key: int: {0}, char: {1}'.format(key, chr(key))) print('Decoded message: {0}'.format(decoded_msg)) def decode(s): print('Decoding [{0}]'.format(s)) for key in range(0, 256): decode_with_key(key, s) def remove_eol(s): """Removes trailing '\n' if there is one""" return s[0:len(s) - 1] if s[len(s) - 1] == '\n' else s def main(): with open(sys.argv[1], 'r') as f: for encoded_str in f: decode(remove_eol(encoded_str)) if __name__ == '__main__': main()
mit
-4,444,479,692,004,526,600
3,377,266,558,762,181,600
30.541667
94
0.509247
false
igabr/Metis_Projects_Chicago_2017
03-Project-McNulty/web_app/src/flask-lesscss/docs/conf.py
6
6500
# -*- coding: utf-8 -*- # # flask-lesscss documentation build configuration file, created by # sphinx-quickstart on Tue May 11 18:54:04 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_themes')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'flask-lesscss' copyright = u'2010, Steve Losh' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.9.1' # The full version, including alpha/beta/rc tags. release = '0.9.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'flask_small' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'flask-lesscssdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'flask-lesscss.tex', u'flask-lesscss Documentation', u'Steve Losh', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
mit
-7,162,976,067,438,640,000
-8,186,986,198,681,859,000
31.828283
80
0.714308
false
fajoy/nova
nova/api/openstack/urlmap.py
12
10628
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paste.urlmap import re import urllib2 from nova.api.openstack import wsgi from nova.openstack.common import log as logging _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) LOG = logging.getLogger(__name__) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in urllib2.parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = parts.next()[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) # Accept application/atom+xml for the index query of each API # version mount point as well as the root index if (app_url and app_url + '/' == path_info) or path_info == '/': supported_content_types.append('application/atom+xml') if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['nova.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response)
apache-2.0
162,420,426,393,985,200
1,715,047,607,341,073,000
34.784512
79
0.586187
false
RaphaelKimmig/django_helpful
django_helpful/__init__.py
1
1416
# Copyright (c) 2013, Raphael Kimmig # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials provided # with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS # AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .utils import * try: from .test_runners import * except ImportError: pass
bsd-2-clause
8,141,973,303,242,262,000
4,424,767,503,877,996,000
49.571429
79
0.786723
false
looker/sdk-examples
python/soft_delete_dashboard.py
1
1367
import sys from typing import Sequence import exceptions from looker_sdk import client, error, models sdk = client.setup("../looker.ini") def main(): """Given a dashboard title, get the ids of all dashboards with matching titles and move them to trash. $ python soft_delete_dashboard.py "An Unused Dashboard" """ dashboard_title = sys.argv[1] if len(sys.argv) > 1 else "" if not dashboard_title: raise exceptions.ArgumentError("Please provide: <dashboardTitle>") dashboards = get_dashboards(dashboard_title) delete_dashboards(dashboards) def get_dashboards(title: str) -> Sequence[models.Dashboard]: """Get dashboards with matching title""" lc_title = title.lower() results = sdk.search_dashboards(title=lc_title) if not results: raise exceptions.NotFoundError(f'dashboard "{title}" not found') assert isinstance(results, Sequence) return results def delete_dashboards(dashboards: Sequence[models.Dashboard]): """Soft delete dashboards""" for dashboard in dashboards: try: assert dashboard.id sdk.delete_dashboard(dashboard.id) except error.SDKError: print(f"Failed to delete dashboard with id {dashboard.id}.") else: print(f'"{dashboard.title}" (id {dashboard.id}) has been moved to trash.') main()
mit
8,932,792,569,654,575,000
4,127,016,540,928,661,500
26.897959
86
0.675933
false
robmcmullen/peppy
peppy/major_modes/fortran_95.py
1
1742
# peppy Copyright (c) 2006-2009 Rob McMullen # Licenced under the GPLv2; see http://peppy.flipturn.org for more info """Fortran 95 programming language editing support. Major mode for editing Fortran 95 files. Supporting actions and minor modes should go here only if they are uniquely applicable to this major mode and can't be used in other major modes. If actions can be used with multiple major modes, they should be put in a separate plugin in the peppy/plugins directory. """ import os import wx import wx.stc from peppy.lib.foldexplorer import * from peppy.lib.autoindent import * from peppy.yapsy.plugins import * from peppy.major import * from peppy.editra.style_specs import unique_keywords from peppy.fundamental import FundamentalMode class Fortran95Mode(FundamentalMode): """Stub major mode for editing Fortran 95 files. This major mode has been automatically generated and is a boilerplate/ placeholder major mode. Enhancements to this mode are appreciated! """ keyword = 'Fortran 95' editra_synonym = 'Fortran 95' stc_lexer_id = wx.stc.STC_LEX_FORTRAN start_line_comment = '!' end_line_comment = '' icon = 'icons/page_white.png' default_classprefs = ( StrParam('extensions', 'f2k f90 f95 fpp', fullwidth=True), StrParam('keyword_set_0', unique_keywords[38], hidden=False, fullwidth=True), StrParam('keyword_set_1', unique_keywords[39], hidden=False, fullwidth=True), StrParam('keyword_set_2', unique_keywords[40], hidden=False, fullwidth=True), ) class Fortran95ModePlugin(IPeppyPlugin): """Plugin to register modes and user interface for Fortran 95 """ def getMajorModes(self): yield Fortran95Mode
gpl-2.0
7,282,428,956,439,728,000
-5,788,833,283,731,277,000
32.5
85
0.723307
false
HH890612/MiliCloud
lib/requests/api.py
92
5400
# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() response = session.request(method=method, url=url, **kwargs) # By explicitly closing the session, we avoid leaving sockets open which # can trigger a ResourceWarning in some cases, and look like a memory leak # in others. session.close() return response def get(url, params=None, **kwargs): """Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): """Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
mit
-1,760,506,161,962,721,300
-7,549,250,222,907,370,000
35.734694
144
0.667222
false
evernym/plenum
plenum/common/script_helper.py
2
6795
import os from jsonpickle import json from plenum.common.constants import CLIENT_STACK_SUFFIX from plenum.common.roles import Roles from plenum.common.transactions import PlenumTransactions from storage.text_file_store import TextFileStore NodeInfoFile = "node-info" GenTxnFile = "genesis_txn" ExportedTxnFile = "exported_genesis_txn" def buildKeepDirIfNotExists(baseDir): keepDir = os.path.expanduser(baseDir) if not os.path.exists(keepDir): os.makedirs(keepDir, exist_ok=True) def isNodeType(baseDir, name): filepath = os.path.join(os.path.expanduser(baseDir), name + CLIENT_STACK_SUFFIX) if os.path.exists(filepath): return True else: return False def getLedger(baseDir, dbName, storeHash=True, isLineNoKey: bool = False): return TextFileStore( dbDir=baseDir, dbName=dbName, storeContentHash=storeHash, isLineNoKey=isLineNoKey) def storeToFile(baseDir, dbName, value, key, storeHash=True, isLineNoKey=False): ledger = getLedger(baseDir, dbName, storeHash=storeHash, isLineNoKey=isLineNoKey) if key is None: ledger.put(value) else: ledger.put(value, key) ledger.close() def getNodeInfo(baseDir, nodeName): ledger = getLedger(baseDir, NodeInfoFile, storeHash=False, isLineNoKey=False) rec = ledger.get(nodeName) ledger.close() return json.loads(rec) def storeNodeInfo(baseDir, nodeName, steward, nodeip, nodeport, clientip, clientport): data = {} vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip, nodeport, clientip, clientport) nodeAddr = vnodeip + ":" + str(vnodeport) clientAddr = vclientip + ":" + str(vclientport) data['steward'] = steward data['nodeAddr'] = nodeAddr data['clientAddr'] = clientAddr newJsonData = json.dumps(data) ledger = getLedger(baseDir, NodeInfoFile, storeHash=False, isLineNoKey=False) storedJsonData = ledger.get(nodeName) if not storedJsonData: storeToFile(baseDir, NodeInfoFile, newJsonData, nodeName, storeHash=False, isLineNoKey=False) elif not storedJsonData == newJsonData: newRec = [] for key, jsonValue in ledger.iterator(include_key=True, include_value=True): if key != nodeName: newRec.append((key, jsonValue)) newRec.append((nodeName, newJsonData)) ledger.reset() for key, value in newRec: storeToFile(baseDir, NodeInfoFile, value, key, storeHash=False, isLineNoKey=False) ledger.close() def storeExportedTxns(baseDir, txn): storeToFile(baseDir, ExportedTxnFile, txn, None, storeHash=False, isLineNoKey=True) def storeGenTxns(baseDir, txn): storeToFile(baseDir, GenTxnFile, txn, None, storeHash=False, isLineNoKey=True) def getAddGenesisHAs(nodeip, nodeport, clientip, clientport): vnodeip = nodeip if nodeip else "127.0.0.1" vnodeport = nodeport if nodeport else "9701" vclientip = clientip if clientip else vnodeip vclientport = clientport if clientport else str(int(vnodeport) + 1) return vnodeip, vnodeport, vclientip, vclientport def getAddNewGenNodeCommand(name, verkey, stewardkey, nodeip, nodeport, clientip, clientport): vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip, nodeport, clientip, clientport) nodeAddr = vnodeip + ":" + vnodeport clientAddr = vclientip + ":" + vclientport return 'add genesis transaction {node} with data {"'.format(node=PlenumTransactions.NODE.name) + name + '": {' \ '"verkey": ' + verkey + \ '"node_address": "' + nodeAddr + '", "client_address": "' + \ clientAddr + '"},' \ '"by": "' + stewardkey + '"}' def getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip, nodeport, clientip, clientport): vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip, nodeport, clientip, clientport) return 'add genesis transaction {node} for '.format(node=PlenumTransactions.NODE.name) + verkey + ' by ' + \ stewardverkey + ' with data {"node_ip": "' + \ vnodeip + '", "node_port": ' + vnodeport + ', "client_ip": "' + \ vclientip + '", "client_port": ' + \ vclientport + ', "alias": "' + name + '"}' def generateNodeGenesisTxn(baseDir, displayTxn, name, verkey, stewardverkey, nodeip, nodeport, clientip, clientport): storeNodeInfo(baseDir, name, stewardverkey, nodeip, nodeport, clientip, clientport) txn = getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip, nodeport, clientip, clientport) storeGenTxns(baseDir, txn) printGenTxn(txn, displayTxn) def getAddNewGenStewardCommand(name, verkey): return 'add genesis transaction {nym} with data {"'.format(nym=PlenumTransactions.NYM.name) \ + name + '": {"verkey": "' + verkey + \ '"} role={role}'.format(role=Roles.STEWARD.name) def getOldAddNewGenStewardCommand(name, verkey): return 'add genesis transaction {nym} for '.format(nym=PlenumTransactions.NYM.name) + verkey + ' with data ' \ '{"alias": ' \ '"' + name + \ '"} role={role}'.format(role=Roles.STEWARD.name) def generateStewardGenesisTxn(baseDir, displayTxn, name, verkey): txn = getOldAddNewGenStewardCommand(name, verkey) storeGenTxns(baseDir, txn) printGenTxn(txn, displayTxn) def printGenTxn(txn, displayTxn): if displayTxn: print('\n' + txn) def _checkClientConnected(cli, ): assert cli.hasSufficientConnections
apache-2.0
3,850,450,858,059,086,000
-1,433,259,805,156,291,600
38.277457
133
0.558646
false
bhavin04890/finaldashboard
static/scripts/tools/csv2xml.py
12
3028
# -*- coding: utf-8 -*- # # Debug/Helper script for CSV stylesheet development # # >>> python csv2xml <CSV File> # ... converts the CSV file into XML # # >>> python csv2xml <CSV File> <XSLT Stylesheet> # ... converts the CSV file into XML and transforms it using the stylesheet # import sys import csv from lxml import etree from xml.sax.saxutils import escape, unescape TABLE = "table" ROW = "row" COL = "col" FIELD = "field" def xml_encode(s): if s: s = escape(s, {"'": "&apos;", '"': "&quot;"}) return s def xml_decode(s): if s: s = unescape(s, {"&apos;": "'", "&quot;": '"'}) return s def parse(source): parser = etree.XMLParser(no_network=False) result = etree.parse(source, parser) return result def csv2tree(source, delimiter=",", quotechar='"'): root = etree.Element(TABLE) def utf_8_encode(source): encodings = ["utf-8", "iso-8859-1"] e = encodings[0] for line in source: if e: try: yield unicode(line, e, "strict").encode("utf-8") except: pass else: continue for encoding in encodings: try: yield unicode(line, encoding, "strict").encode("utf-8") except: continue else: e = encoding break reader = csv.DictReader(utf_8_encode(source), delimiter=delimiter, quotechar=quotechar) for r in reader: row = etree.SubElement(root, ROW) for k in r: col = etree.SubElement(row, COL) col.set(FIELD, str(k)) value = r[k] if value: text = str(value) if text.lower() not in ("null", "<null>"): text = xml_encode(unicode(text.decode("utf-8"))) col.text = text else: col.text = "" return etree.ElementTree(root) def transform(tree, stylesheet_path, **args): if args: _args = [(k, "'%s'" % args[k]) for k in args] _args = dict(_args) else: _args = None stylesheet = etree.parse(stylesheet_path) ac = etree.XSLTAccessControl(read_file=True, read_network=True) transformer = etree.XSLT(stylesheet, access_control=ac) if _args: result = transformer(tree, **_args) else: result = transformer(tree) return result def main(argv): try: csvpath = argv[0] except: print "Usage: python csv2xml <CSV File> [<XSLT Stylesheet>]" return try: xslpath = argv[1] except: xslpath = None csvfile = open(csvpath) tree = csv2tree(csvfile) if xslpath is not None: tree = transform(tree, xslpath) print etree.tostring(tree, pretty_print=True) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
mit
-5,937,495,960,317,118,000
-6,962,258,394,316,862,000
24.024793
81
0.519815
false
erjac77/ansible-module-f5bigip
library/f5bigip_ltm_profile_diameter.py
2
8266
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016-2018, Eric Jacob <erjac77@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: f5bigip_ltm_profile_diameter short_description: BIG-IP ltm profile diameter module description: - Configures a profile to manage Diameter network traffic. version_added: "2.4" author: - "Gabriel Fortin (@GabrielFortin)" options: app_service: description: - Specifies the name of the application service to which the profile belongs. connection_prime: description: - When enabled, and the system receives a capabilities exchange request from the client, the system will establish connections and perform handshaking with all the servers prior to sending the capabilities exchange answer to the client. default: disabled choices: ['disabled', 'enabled'] defaults_from: description: - Specifies the profile that you want to use as the parent profile. default: diameter description: description: - User defined description. destination_realm: description: - This attribute has been deprecated as of BIG-IP v11. handshake_timeout: description: - Specifies the handshake timeout in seconds. default: 10 choices: range(0,4294967296) host_ip_rewrite: description: - When enabled and the message is a capabilities exchange request or capabilities exchange answer, rewrite the host-ip-address attribute with the system's egress IP address. default: enabled choices: ['disabled', 'enabled'] max_retransmit_attempts: description: - Specifies the maximum number of retransmit attempts. default: 1 choices: range(0,4294967296) max_watchdog_failure: description: - Specifies the maximum number of device watchdog failures that the traffic management system can take before it tears down the connection. default: 10 choices: range(0,4294967296) name: description: - Specifies a unique name for the component. required: true origin_host_to_client: description: - Specifies the origin host to client of BIG-IP. origin_host_to_server: description: - Specifies the origin host to server of BIG-IP. origin_realm_to_client: description: - Specifies the origin realm of BIG-IP. origin_realm_to_server: description: - Specifies the origin realm to server of BIG-IP. overwrite_destination_host: description: - This attribute has been deprecated as of BIG-IP v11. default: enabled choices: ['disabled', 'enabled'] parent_avp: description: - Specifies the name of the Diameter attribute that the system uses to indicate if the persist-avp option is embedded in a grouped avp. choices: range(0, 4294967296) partition: description: - Displays the administrative partition within which the profile resides. persist_avp: description: - Specifies the name of the Diameter attribute that the system persists on. reset_on_timeout: description: - When it is enabled and the watchdog failures exceed the max watchdog failure, the system resets the connection. default: enabled choices: ['disabled', 'enabled'] retransmit_timeout: description: - Specifies the retransmit timeout in seconds. default: 10 choices: range(0, 4294967296) subscriber_aware: description: - When you enable this option, the system extracts available subscriber information, such as phone number or phone model, from diameter authentication and/or accounting packets. default: disabled choices: ['disabled', 'enabled'] state: description: - Specifies the state of the component on the BIG-IP system. default: present choices: ['absent', 'present'] watchdog_timeout: description: - Specifies the watchdog timeout in seconds. default: 0 choices: range(0, 4294967296) requirements: - BIG-IP >= 12.0 - ansible-common-f5 - f5-sdk ''' EXAMPLES = ''' - name: Create LTM Profile Diameter f5bigip_ltm_profile_diameter: f5_hostname: 172.16.227.35 f5_username: admin f5_password: admin f5_port: 443 name: my_diameter_profile partition: Common description: My diameter profile state: present delegate_to: localhost ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import range from ansible_common_f5.base import F5_ACTIVATION_CHOICES from ansible_common_f5.base import F5_NAMED_OBJ_ARGS from ansible_common_f5.base import F5_PROVIDER_ARGS from ansible_common_f5.bigip import F5BigIpNamedObject class ModuleParams(object): @property def argument_spec(self): argument_spec = dict( app_service=dict(type='str'), connection_prime=dict(type='str', choices=F5_ACTIVATION_CHOICES), defaults_from=dict(type='str'), description=dict(type='str'), destination_realm=dict(type='str'), handshake_timeout=dict(type='int', choices=range(0, 4294967296)), host_ip_rewrite=dict(type='str', choices=F5_ACTIVATION_CHOICES), max_retransmit_attempts=dict(type='int', choices=range(0, 4294967296)), max_watchdog_failure=dict(type='int', choices=range(0, 4294967296)), origin_host_to_client=dict(type='str'), origin_host_to_server=dict(type='str'), origin_realm_to_client=dict(type='str'), origin_realm_to_server=dict(type='str'), overwrite_destination_host=dict(type='str', choices=F5_ACTIVATION_CHOICES), parent_avp=dict(type='str'), persist_avp=dict(type='str'), reset_on_timeout=dict(type='str', choices=F5_ACTIVATION_CHOICES), retransmit_timeout=dict(type='int', choices=range(0, 4294967296)), subscriber_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES), watchdog_timeout=dict(type='int', choices=range(0, 4294967296)) ) argument_spec.update(F5_PROVIDER_ARGS) argument_spec.update(F5_NAMED_OBJ_ARGS) return argument_spec @property def supports_check_mode(self): return True class F5BigIpLtmProfileDiameter(F5BigIpNamedObject): def _set_crud_methods(self): self._methods = { 'create': self._api.tm.ltm.profile.diameters.diameter.create, 'read': self._api.tm.ltm.profile.diameters.diameter.load, 'update': self._api.tm.ltm.profile.diameters.diameter.update, 'delete': self._api.tm.ltm.profile.diameters.diameter.delete, 'exists': self._api.tm.ltm.profile.diameters.diameter.exists } def main(): params = ModuleParams() module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode) try: obj = F5BigIpLtmProfileDiameter(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except Exception as exc: module.fail_json(msg=str(exc)) if __name__ == '__main__': main()
apache-2.0
-2,280,381,754,712,111,400
-4,036,164,877,337,437,000
36.067265
120
0.648681
false
aaxelb/osf.io
framework/postcommit_tasks/handlers.py
21
3764
# -*- coding: utf-8 -*- import functools import hashlib import logging import threading import binascii from collections import OrderedDict import os from celery import chain from framework.celery_tasks import app from celery.local import PromiseProxy from gevent.pool import Pool from website import settings _local = threading.local() logger = logging.getLogger(__name__) def postcommit_queue(): if not hasattr(_local, 'postcommit_queue'): _local.postcommit_queue = OrderedDict() return _local.postcommit_queue def postcommit_celery_queue(): if not hasattr(_local, 'postcommit_celery_queue'): _local.postcommit_celery_queue = OrderedDict() return _local.postcommit_celery_queue def postcommit_before_request(): _local.postcommit_queue = OrderedDict() _local.postcommit_celery_queue = OrderedDict() @app.task(max_retries=5, default_retry_delay=60) def postcommit_celery_task_wrapper(queue): # chain.apply calls the tasks synchronously without re-enqueuing each one # http://stackoverflow.com/questions/34177131/how-to-solve-python-celery-error-when-using-chain-encodeerrorruntimeerrormaxi?answertab=votes#tab-top chain(*queue.values()).apply() def postcommit_after_request(response, base_status_error_code=500): if response.status_code >= base_status_error_code: _local.postcommit_queue = OrderedDict() _local.postcommit_celery_queue = OrderedDict() return response try: if postcommit_queue(): number_of_threads = 30 # one db connection per greenlet, let's share pool = Pool(number_of_threads) for func in postcommit_queue().values(): pool.spawn(func) pool.join(timeout=5.0, raise_error=True) # 5 second timeout and reraise exceptions if postcommit_celery_queue(): if settings.USE_CELERY: # delay pushes the wrapper task into celery postcommit_celery_task_wrapper.delay(postcommit_celery_queue()) else: for task in postcommit_celery_queue().values(): task() except AttributeError as ex: if not settings.DEBUG_MODE: logger.error('Post commit task queue not initialized: {}'.format(ex)) return response def enqueue_postcommit_task(fn, args, kwargs, celery=False, once_per_request=True): # make a hash of the pertinent data raw = [fn.__name__, fn.__module__, args, kwargs] m = hashlib.md5() m.update('-'.join([x.__repr__() for x in raw])) key = m.hexdigest() if not once_per_request: # we want to run it once for every occurrence, add a random string key = '{}:{}'.format(key, binascii.hexlify(os.urandom(8))) if celery and isinstance(fn, PromiseProxy): postcommit_celery_queue().update({key: fn.si(*args, **kwargs)}) else: postcommit_queue().update({key: functools.partial(fn, *args, **kwargs)}) handlers = { 'before_request': postcommit_before_request, 'after_request': postcommit_after_request, } def run_postcommit(once_per_request=True, celery=False): ''' Delays function execution until after the request's transaction has been committed. If you set the celery kwarg to True args and kwargs must be JSON serializable Tasks will only be run if the response's status code is < 500. :return: ''' def wrapper(func): # if we're local dev or running unit tests, run without queueing if settings.DEBUG_MODE: return func @functools.wraps(func) def wrapped(*args, **kwargs): enqueue_postcommit_task(func, args, kwargs, celery=celery, once_per_request=once_per_request) return wrapped return wrapper
apache-2.0
-6,281,860,607,179,661,000
7,485,267,819,883,353,000
35.543689
151
0.671892
false
ForkedReposBak/mxnet
python/mxnet/gluon/contrib/nn/basic_layers.py
2
17216
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable= arguments-differ """Custom neural network layers in model_zoo.""" __all__ = ['Concurrent', 'HybridConcurrent', 'Identity', 'SparseEmbedding', 'SyncBatchNorm', 'PixelShuffle1D', 'PixelShuffle2D', 'PixelShuffle3D'] import warnings from .... import ndarray as nd, context from ...block import HybridBlock, Block from ...nn import Sequential, HybridSequential, BatchNorm class Concurrent(Sequential): """Lays `Block` s concurrently. This block feeds its input to all children blocks, and produce the output by concatenating all the children blocks' outputs on the specified axis. Example:: net = Concurrent() # use net's name_scope to give children blocks appropriate names. with net.name_scope(): net.add(nn.Dense(10, activation='relu')) net.add(nn.Dense(20)) net.add(Identity()) Parameters ---------- axis : int, default -1 The axis on which to concatenate the outputs. """ def __init__(self, axis=-1, prefix=None, params=None): super(Concurrent, self).__init__(prefix=prefix, params=params) self.axis = axis def forward(self, x): out = [] for block in self._children.values(): out.append(block()(x)) out = nd.concat(*out, dim=self.axis) return out class HybridConcurrent(HybridSequential): """Lays `HybridBlock` s concurrently. This block feeds its input to all children blocks, and produce the output by concatenating all the children blocks' outputs on the specified axis. Example:: net = HybridConcurrent() # use net's name_scope to give children blocks appropriate names. with net.name_scope(): net.add(nn.Dense(10, activation='relu')) net.add(nn.Dense(20)) net.add(Identity()) Parameters ---------- axis : int, default -1 The axis on which to concatenate the outputs. """ def __init__(self, axis=-1, prefix=None, params=None): super(HybridConcurrent, self).__init__(prefix=prefix, params=params) self.axis = axis def hybrid_forward(self, F, x): out = [] for block in self._children.values(): out.append(block()(x)) out = F.concat(*out, dim=self.axis) return out class Identity(HybridBlock): """Block that passes through the input directly. This block can be used in conjunction with HybridConcurrent block for residual connection. Example:: net = HybridConcurrent() # use net's name_scope to give child Blocks appropriate names. with net.name_scope(): net.add(nn.Dense(10, activation='relu')) net.add(nn.Dense(20)) net.add(Identity()) """ def __init__(self, prefix=None, params=None): super(Identity, self).__init__(prefix=prefix, params=params) def hybrid_forward(self, F, x): return x class SparseEmbedding(Block): r"""Turns non-negative integers (indexes/tokens) into dense vectors of fixed size. eg. [4, 20] -> [[0.25, 0.1], [0.6, -0.2]] This SparseBlock is designed for distributed training with extremely large input dimension. Both weight and gradient w.r.t. weight are `RowSparseNDArray`. Note: if `sparse_grad` is set to True, the gradient w.r.t weight will be sparse. Only a subset of optimizers support sparse gradients, including SGD, AdaGrad and Adam. By default lazy updates is turned on, which may perform differently from standard updates. For more details, please check the Optimization API at: https://mxnet.incubator.apache.org/api/python/optimization/optimization.html Parameters ---------- input_dim : int Size of the vocabulary, i.e. maximum integer index + 1. output_dim : int Dimension of the dense embedding. dtype : str or np.dtype, default 'float32' Data type of output embeddings. weight_initializer : Initializer Initializer for the `embeddings` matrix. Inputs: - **data**: (N-1)-D tensor with shape: `(x1, x2, ..., xN-1)`. Output: - **out**: N-D tensor with shape: `(x1, x2, ..., xN-1, output_dim)`. """ def __init__(self, input_dim, output_dim, dtype='float32', weight_initializer=None, **kwargs): super(SparseEmbedding, self).__init__(**kwargs) self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim, 'dtype': dtype, 'sparse_grad': True} self.weight = self.params.get('weight', shape=(input_dim, output_dim), init=weight_initializer, dtype=dtype, grad_stype='row_sparse', stype='row_sparse') def forward(self, x): weight = self.weight.row_sparse_data(x) return nd.Embedding(x, weight, name='fwd', **self._kwargs) def __repr__(self): s = '{block_name}({input_dim} -> {output_dim}, {dtype})' return s.format(block_name=self.__class__.__name__, **self._kwargs) class SyncBatchNorm(BatchNorm): """Cross-GPU Synchronized Batch normalization (SyncBN) Standard BN [1]_ implementation only normalize the data within each device. SyncBN normalizes the input within the whole mini-batch. We follow the implementation described in the paper [2]_. Note: Current implementation of SyncBN does not support FP16 training. For FP16 inference, use standard nn.BatchNorm instead of SyncBN. Parameters ---------- in_channels : int, default 0 Number of channels (feature maps) in input data. If not specified, initialization will be deferred to the first time `forward` is called and `in_channels` will be inferred from the shape of input data. num_devices : int, default number of visible GPUs momentum: float, default 0.9 Momentum for the moving average. epsilon: float, default 1e-5 Small float added to variance to avoid dividing by zero. center: bool, default True If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: bool, default True If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. use_global_stats: bool, default False If True, use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. If False, use local batch-norm. beta_initializer: str or `Initializer`, default 'zeros' Initializer for the beta weight. gamma_initializer: str or `Initializer`, default 'ones' Initializer for the gamma weight. running_mean_initializer: str or `Initializer`, default 'zeros' Initializer for the running mean. running_variance_initializer: str or `Initializer`, default 'ones' Initializer for the running variance. Inputs: - **data**: input tensor with arbitrary shape. Outputs: - **out**: output tensor with the same shape as `data`. Reference: .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating \ deep network training by reducing internal covariate shift." *ICML 2015* .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, \ Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* """ def __init__(self, in_channels=0, num_devices=None, momentum=0.9, epsilon=1e-5, center=True, scale=True, use_global_stats=False, beta_initializer='zeros', gamma_initializer='ones', running_mean_initializer='zeros', running_variance_initializer='ones', **kwargs): super(SyncBatchNorm, self).__init__( axis=1, momentum=momentum, epsilon=epsilon, center=center, scale=scale, use_global_stats=use_global_stats, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, running_mean_initializer=running_mean_initializer, running_variance_initializer=running_variance_initializer, in_channels=in_channels, **kwargs) num_devices = self._get_num_devices() if num_devices is None else num_devices self._kwargs = {'eps': epsilon, 'momentum': momentum, 'fix_gamma': not scale, 'use_global_stats': use_global_stats, 'ndev': num_devices, 'key': self.prefix} def _get_num_devices(self): warnings.warn("Caution using SyncBatchNorm: " "if not using all the GPUs, please mannually set num_devices", UserWarning) num_devices = context.num_gpus() num_devices = num_devices if num_devices > 0 else 1 return num_devices def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var): return F.contrib.SyncBatchNorm(x, gamma, beta, running_mean, running_var, name='fwd', **self._kwargs) class PixelShuffle1D(HybridBlock): r"""Pixel-shuffle layer for upsampling in 1 dimension. Pixel-shuffling is the operation of taking groups of values along the *channel* dimension and regrouping them into blocks of pixels along the ``W`` dimension, thereby effectively multiplying that dimension by a constant factor in size. For example, a feature map of shape :math:`(fC, W)` is reshaped into :math:`(C, fW)` by forming little value groups of size :math:`f` and arranging them in a grid of size :math:`W`. Parameters ---------- factor : int or 1-tuple of int Upsampling factor, applied to the ``W`` dimension. Inputs: - **data**: Tensor of shape ``(N, f*C, W)``. Outputs: - **out**: Tensor of shape ``(N, C, W*f)``. Examples -------- >>> pxshuf = PixelShuffle1D(2) >>> x = mx.nd.zeros((1, 8, 3)) >>> pxshuf(x).shape (1, 4, 6) """ def __init__(self, factor): super(PixelShuffle1D, self).__init__() self._factor = int(factor) def hybrid_forward(self, F, x): """Perform pixel-shuffling on the input.""" f = self._factor # (N, C*f, W) x = F.reshape(x, (0, -4, -1, f, 0)) # (N, C, f, W) x = F.transpose(x, (0, 1, 3, 2)) # (N, C, W, f) x = F.reshape(x, (0, 0, -3)) # (N, C, W*f) return x def __repr__(self): return "{}({})".format(self.__class__.__name__, self._factor) class PixelShuffle2D(HybridBlock): r"""Pixel-shuffle layer for upsampling in 2 dimensions. Pixel-shuffling is the operation of taking groups of values along the *channel* dimension and regrouping them into blocks of pixels along the ``H`` and ``W`` dimensions, thereby effectively multiplying those dimensions by a constant factor in size. For example, a feature map of shape :math:`(f^2 C, H, W)` is reshaped into :math:`(C, fH, fW)` by forming little :math:`f \times f` blocks of pixels and arranging them in an :math:`H \times W` grid. Pixel-shuffling together with regular convolution is an alternative, learnable way of upsampling an image by arbitrary factors. It is reported to help overcome checkerboard artifacts that are common in upsampling with transposed convolutions (also called deconvolutions). See the paper `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ for further details. Parameters ---------- factor : int or 2-tuple of int Upsampling factors, applied to the ``H`` and ``W`` dimensions, in that order. Inputs: - **data**: Tensor of shape ``(N, f1*f2*C, H, W)``. Outputs: - **out**: Tensor of shape ``(N, C, H*f1, W*f2)``. Examples -------- >>> pxshuf = PixelShuffle2D((2, 3)) >>> x = mx.nd.zeros((1, 12, 3, 5)) >>> pxshuf(x).shape (1, 2, 6, 15) """ def __init__(self, factor): super(PixelShuffle2D, self).__init__() try: self._factors = (int(factor),) * 2 except TypeError: self._factors = tuple(int(fac) for fac in factor) assert len(self._factors) == 2, "wrong length {}".format(len(self._factors)) def hybrid_forward(self, F, x): """Perform pixel-shuffling on the input.""" f1, f2 = self._factors # (N, f1*f2*C, H, W) x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W) x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W) x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2) x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2) return x def __repr__(self): return "{}({})".format(self.__class__.__name__, self._factors) class PixelShuffle3D(HybridBlock): r"""Pixel-shuffle layer for upsampling in 3 dimensions. Pixel-shuffling (or voxel-shuffling in 3D) is the operation of taking groups of values along the *channel* dimension and regrouping them into blocks of voxels along the ``D``, ``H`` and ``W`` dimensions, thereby effectively multiplying those dimensions by a constant factor in size. For example, a feature map of shape :math:`(f^3 C, D, H, W)` is reshaped into :math:`(C, fD, fH, fW)` by forming little :math:`f \times f \times f` blocks of voxels and arranging them in a :math:`D \times H \times W` grid. Pixel-shuffling together with regular convolution is an alternative, learnable way of upsampling an image by arbitrary factors. It is reported to help overcome checkerboard artifacts that are common in upsampling with transposed convolutions (also called deconvolutions). See the paper `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ for further details. Parameters ---------- factor : int or 3-tuple of int Upsampling factors, applied to the ``D``, ``H`` and ``W`` dimensions, in that order. Inputs: - **data**: Tensor of shape ``(N, f1*f2*f3*C, D, H, W)``. Outputs: - **out**: Tensor of shape ``(N, C, D*f1, H*f2, W*f3)``. Examples -------- >>> pxshuf = PixelShuffle3D((2, 3, 4)) >>> x = mx.nd.zeros((1, 48, 3, 5, 7)) >>> pxshuf(x).shape (1, 2, 6, 15, 28) """ def __init__(self, factor): super(PixelShuffle3D, self).__init__() try: self._factors = (int(factor),) * 3 except TypeError: self._factors = tuple(int(fac) for fac in factor) assert len(self._factors) == 3, "wrong length {}".format(len(self._factors)) def hybrid_forward(self, F, x): """Perform pixel-shuffling on the input.""" # `transpose` doesn't support 8D, need other implementation f1, f2, f3 = self._factors # (N, C*f1*f2*f3, D, H, W) x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W) x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W) x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W) x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W) x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W) x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W) x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W) x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3) x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3) return x def __repr__(self): return "{}({})".format(self.__class__.__name__, self._factors)
apache-2.0
4,431,474,274,047,349,000
6,564,196,105,991,773,000
39.603774
100
0.599326
false
jagg81/translate-toolkit
build/lib.linux-x86_64-2.6/translate/convert/prop2po.py
3
9977
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2002-2006 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with translate; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """convert Java/Mozilla .properties files to Gettext PO localization files See: http://translate.sourceforge.net/wiki/toolkit/prop2po for examples and usage instructions """ import sys from translate.storage import po from translate.storage import properties class prop2po: """convert a .properties file to a .po file for handling the translation.""" def convertstore(self, thepropfile, personality="java", duplicatestyle="msgctxt"): """converts a .properties file to a .po file...""" self.personality = personality thetargetfile = po.pofile() if self.personality == "mozilla" or self.personality == "skype": targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit", x_accelerator_marker="&") else: targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit") targetheader.addnote("extracted from %s" % thepropfile.filename, "developer") # we try and merge the header po with any comments at the start of the # properties file appendedheader = False waitingcomments = [] for propunit in thepropfile.units: pounit = self.convertunit(propunit, "developer") if pounit is None: waitingcomments.extend(propunit.comments) # FIXME the storage class should not be creating blank units if pounit is "discard": continue if not appendedheader: if propunit.isblank(): targetheader.addnote("\n".join(waitingcomments).rstrip(), "developer", position="prepend") waitingcomments = [] pounit = None appendedheader = True if pounit is not None: pounit.addnote("\n".join(waitingcomments).rstrip(), "developer", position="prepend") waitingcomments = [] thetargetfile.addunit(pounit) thetargetfile.removeduplicates(duplicatestyle) return thetargetfile def mergestore(self, origpropfile, translatedpropfile, personality="java", blankmsgstr=False, duplicatestyle="msgctxt"): """converts two .properties files to a .po file...""" self.personality = personality thetargetfile = po.pofile() if self.personality == "mozilla" or self.personality == "skype": targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit", x_accelerator_marker="&") else: targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit") targetheader.addnote("extracted from %s, %s" % (origpropfile.filename, translatedpropfile.filename), "developer") translatedpropfile.makeindex() # we try and merge the header po with any comments at the start of # the properties file appendedheader = False waitingcomments = [] # loop through the original file, looking at units one by one for origprop in origpropfile.units: origpo = self.convertunit(origprop, "developer") if origpo is None: waitingcomments.extend(origprop.comments) # FIXME the storage class should not be creating blank units if origpo is "discard": continue # handle the header case specially... if not appendedheader: if origprop.isblank(): targetheader.addnote(u"".join(waitingcomments).rstrip(), "developer", position="prepend") waitingcomments = [] origpo = None appendedheader = True # try and find a translation of the same name... if origprop.name in translatedpropfile.locationindex: translatedprop = translatedpropfile.locationindex[origprop.name] # Need to check that this comment is not a copy of the # developer comments translatedpo = self.convertunit(translatedprop, "translator") if translatedpo is "discard": continue else: translatedpo = None # if we have a valid po unit, get the translation and add it... if origpo is not None: if translatedpo is not None and not blankmsgstr: origpo.target = translatedpo.source origpo.addnote(u"".join(waitingcomments).rstrip(), "developer", position="prepend") waitingcomments = [] thetargetfile.addunit(origpo) elif translatedpo is not None: print >> sys.stderr, "error converting original properties definition %s" % origprop.name thetargetfile.removeduplicates(duplicatestyle) return thetargetfile def convertunit(self, propunit, commenttype): """Converts a .properties unit to a .po unit. Returns None if empty or not for translation.""" if propunit is None: return None # escape unicode pounit = po.pounit(encoding="UTF-8") if hasattr(propunit, "comments"): for comment in propunit.comments: if "DONT_TRANSLATE" in comment: return "discard" pounit.addnote(u"".join(propunit.getnotes()).rstrip(), commenttype) # TODO: handle multiline msgid if propunit.isblank(): return None pounit.addlocation(propunit.name) pounit.source = propunit.source pounit.target = u"" return pounit def convertstrings(inputfile, outputfile, templatefile, personality="strings", pot=False, duplicatestyle="msgctxt", encoding=None): """.strings specific convertor function""" return convertprop(inputfile, outputfile, templatefile, personality="strings", pot=pot, duplicatestyle=duplicatestyle, encoding=encoding) def convertmozillaprop(inputfile, outputfile, templatefile, pot=False, duplicatestyle="msgctxt"): """Mozilla specific convertor function""" return convertprop(inputfile, outputfile, templatefile, personality="mozilla", pot=pot, duplicatestyle=duplicatestyle) def convertprop(inputfile, outputfile, templatefile, personality="java", pot=False, duplicatestyle="msgctxt", encoding=None): """reads in inputfile using properties, converts using prop2po, writes to outputfile""" inputstore = properties.propfile(inputfile, personality, encoding) convertor = prop2po() if templatefile is None: outputstore = convertor.convertstore(inputstore, personality, duplicatestyle=duplicatestyle) else: templatestore = properties.propfile(templatefile, personality, encoding) outputstore = convertor.mergestore(templatestore, inputstore, personality, blankmsgstr=pot, duplicatestyle=duplicatestyle) if outputstore.isempty(): return 0 outputfile.write(str(outputstore)) return 1 formats = { "properties": ("po", convertprop), ("properties", "properties"): ("po", convertprop), "lang": ("po", convertprop), ("lang", "lang"): ("po", convertprop), "strings": ("po", convertstrings), ("strings", "strings"): ("po", convertstrings), } def main(argv=None): from translate.convert import convert parser = convert.ConvertOptionParser(formats, usetemplates=True, usepots=True, description=__doc__) parser.add_option("", "--personality", dest="personality", default=properties.default_dialect, type="choice", choices=properties.dialects.keys(), help="override the input file format: %s (for .properties files, default: %s)" % (", ".join(properties.dialects.iterkeys()), properties.default_dialect), metavar="TYPE") parser.add_option("", "--encoding", dest="encoding", default=None, help="override the encoding set by the personality", metavar="ENCODING") parser.add_duplicates_option() parser.passthrough.append("pot") parser.passthrough.append("personality") parser.passthrough.append("encoding") parser.run(argv) if __name__ == '__main__': main()
gpl-2.0
3,541,070,077,446,515,000
7,999,936,940,814,030,000
43.342222
108
0.587652
false
arenadata/ambari
ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_extension/HIVE/package/scripts/status_params.py
25
1062
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management import * config = Script.get_config() hive_pid_dir = config['configurations']['global']['hive_pid_dir'] hive_pid = 'hive-server.pid' hive_metastore_pid = 'hive.pid' hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
apache-2.0
3,494,227,074,862,030,000
6,816,243,270,022,910,000
34.4
79
0.768362
false
awacha/cct
cct/qtgui/devices/motor/movemotor/movemotor.py
1
4527
import logging from PyQt5 import QtWidgets, QtGui from .movemotor_ui import Ui_Form from ....core.mixins import ToolWindow from .....core.devices import Motor from .....core.instrument.privileges import PRIV_MOVEMOTORS logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class MoveMotor(QtWidgets.QWidget, Ui_Form, ToolWindow): required_privilege = PRIV_MOVEMOTORS def __init__(self, *args, **kwargs): credo = kwargs.pop('credo') self.motorname = kwargs.pop('motorname') QtWidgets.QWidget.__init__(self, *args, **kwargs) self.setupToolWindow(credo, required_devices=['Motor_' + self.motorname]) self._start_requested = False self.setupUi(self) def setupUi(self, Form): Ui_Form.setupUi(self, Form) self.motorComboBox.addItems(sorted(self.credo.motors.keys())) self.motorComboBox.currentTextChanged.connect(self.onMotorSelected) self.movePushButton.clicked.connect(self.onMove) self.motorComboBox.setCurrentIndex(self.motorComboBox.findText(self.motorname)) self.relativeCheckBox.toggled.connect(self.onRelativeChanged) self.targetDoubleSpinBox.editingFinished.connect(self.onEditingFinished) self.onMotorSelected() self.adjustSize() def onEditingFinished(self): if self.targetDoubleSpinBox.hasFocus(): self.onMove() def onRelativeChanged(self): self.onMotorPositionChange(self.motor(), self.motor().where()) if self.relativeCheckBox.isChecked(): self.targetDoubleSpinBox.setValue(0) else: self.targetDoubleSpinBox.setValue(self.motor().where()) self.adjustSize() def setIdle(self): super().setIdle() self.movePushButton.setText('Move') icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/icons/motor.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.movePushButton.setIcon(icon) self.targetDoubleSpinBox.setEnabled(True) self.motorComboBox.setEnabled(True) self.relativeCheckBox.setEnabled(True) self.movePushButton.setEnabled(True) self._start_requested = False def setBusy(self): self.movePushButton.setText('Stop') self.movePushButton.setIcon(QtGui.QIcon.fromTheme('process-stop')) self.targetDoubleSpinBox.setEnabled(False) self.motorComboBox.setEnabled(False) self.relativeCheckBox.setEnabled(False) self.movePushButton.setEnabled(True) super().setBusy() def motor(self) -> Motor: return self.credo.motors[self.motorComboBox.currentText()] def onMove(self): if self.movePushButton.text() == 'Move': self.movePushButton.setEnabled(False) self._start_requested = True if self.relativeCheckBox.isChecked(): self.motor().moverel(self.targetDoubleSpinBox.value()) else: self.motor().moveto(self.targetDoubleSpinBox.value()) else: self.movePushButton.setEnabled(False) self.motor().stop() def onMotorStart(self, motor: Motor): if self._start_requested: self.setBusy() def onMotorSelected(self): self.setWindowTitle('Move motor {}'.format(self.motorComboBox.currentText())) for d in self.required_devices: self.unrequireDevice(d) self.required_devices = ['Motor_' + self.motorComboBox.currentText()] self.requireDevice(self.required_devices[0]) motor = self.credo.motors[self.motorComboBox.currentText()] self.onMotorPositionChange(motor, motor.where()) if self.relativeCheckBox.isChecked(): self.targetDoubleSpinBox.setValue(0.0) else: self.targetDoubleSpinBox.setValue(motor.where()) def onMotorPositionChange(self, motor: Motor, newposition: float): self.positionLabel.setText('<b>{:.4f}</b>'.format(newposition)) left = motor.get_variable('softleft') right = motor.get_variable('softright') if self.relativeCheckBox.isChecked(): left -= newposition right -= newposition self.targetDoubleSpinBox.setMinimum(left) self.targetDoubleSpinBox.setMaximum(right) self.leftLimitLabel.setText('{:.4f}'.format(left)) self.rightLimitLabel.setText('{:.4f}'.format(right)) self.adjustSize() def onMotorStop(self, motor: Motor, targetpositionreached: bool): self.setIdle()
bsd-3-clause
-8,256,177,171,665,578,000
6,117,861,012,095,764,000
38.365217
95
0.664237
false
wfnex/openbras
src/VPP/test/test_vxlan.py
2
9043
#!/usr/bin/env python import socket from util import ip4n_range import unittest from framework import VppTestCase, VppTestRunner from template_bd import BridgeDomain from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP from scapy.layers.vxlan import VXLAN from scapy.utils import atol class TestVxlan(BridgeDomain, VppTestCase): """ VXLAN Test Case """ def __init__(self, *args): BridgeDomain.__init__(self) VppTestCase.__init__(self, *args) def encapsulate(self, pkt, vni): """ Encapsulate the original payload frame by adding VXLAN header with its UDP, IP and Ethernet fields """ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) / UDP(sport=self.dport, dport=self.dport, chksum=0) / VXLAN(vni=vni, flags=self.flags) / pkt) def encap_mcast(self, pkt, src_ip, src_mac, vni): """ Encapsulate the original payload frame by adding VXLAN header with its UDP, IP and Ethernet fields """ return (Ether(src=src_mac, dst=self.mcast_mac) / IP(src=src_ip, dst=self.mcast_ip4) / UDP(sport=self.dport, dport=self.dport, chksum=0) / VXLAN(vni=vni, flags=self.flags) / pkt) def decapsulate(self, pkt): """ Decapsulate the original payload frame by removing VXLAN header """ # check if is set I flag self.assertEqual(pkt[VXLAN].flags, int('0x8', 16)) return pkt[VXLAN].payload # Method for checking VXLAN encapsulation. # def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False): # TODO: add error messages # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved # by VPP using ARP. self.assertEqual(pkt[Ether].src, self.pg0.local_mac) if not local_only: if not mcast_pkt: self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac) else: self.assertEqual(pkt[Ether].dst, type(self).mcast_mac) # Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP. self.assertEqual(pkt[IP].src, self.pg0.local_ip4) if not local_only: if not mcast_pkt: self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4) else: self.assertEqual(pkt[IP].dst, type(self).mcast_ip4) # Verify UDP destination port is VXLAN 4789, source UDP port could be # arbitrary. self.assertEqual(pkt[UDP].dport, type(self).dport) # TODO: checksum check # Verify VNI self.assertEqual(pkt[VXLAN].vni, vni) @classmethod def create_vxlan_flood_test_bd(cls, vni, n_ucast_tunnels): # Create 10 ucast vxlan tunnels under bd ip_range_start = 10 ip_range_end = ip_range_start + n_ucast_tunnels next_hop_address = cls.pg0.remote_ip4n for dest_ip4n in ip4n_range(next_hop_address, ip_range_start, ip_range_end): # add host route so dest_ip4n will not be resolved cls.vapi.ip_add_del_route(dest_ip4n, 32, next_hop_address) r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=dest_ip4n, vni=vni) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni) @classmethod def add_del_shared_mcast_dst_load(cls, is_add): """ add or del tunnels sharing the same mcast dst to test vxlan ref_count mechanism """ n_shared_dst_tunnels = 2000 vni_start = 10000 vni_end = vni_start + n_shared_dst_tunnels for vni in range(vni_start, vni_end): r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=cls.mcast_ip4n, mcast_sw_if_index=1, vni=vni, is_add=is_add) if r.sw_if_index == 0xffffffff: raise "bad sw_if_index" @classmethod def add_shared_mcast_dst_load(cls): cls.add_del_shared_mcast_dst_load(is_add=1) @classmethod def del_shared_mcast_dst_load(cls): cls.add_del_shared_mcast_dst_load(is_add=0) @classmethod def add_del_mcast_tunnels_load(cls, is_add): """ add or del tunnels to test vxlan stability """ n_distinct_dst_tunnels = 200 ip_range_start = 10 ip_range_end = ip_range_start + n_distinct_dst_tunnels for dest_ip4n in ip4n_range(cls.mcast_ip4n, ip_range_start, ip_range_end): vni = bytearray(dest_ip4n)[3] cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=dest_ip4n, mcast_sw_if_index=1, vni=vni, is_add=is_add) @classmethod def add_mcast_tunnels_load(cls): cls.add_del_mcast_tunnels_load(is_add=1) @classmethod def del_mcast_tunnels_load(cls): cls.add_del_mcast_tunnels_load(is_add=0) # Class method to start the VXLAN test case. # Overrides setUpClass method in VppTestCase class. # Python try..except statement is used to ensure that the tear down of # the class will be executed even if exception is raised. # @param cls The class pointer. @classmethod def setUpClass(cls): super(TestVxlan, cls).setUpClass() try: cls.dport = 4789 cls.flags = 0x8 # Create 2 pg interfaces. cls.create_pg_interfaces(range(4)) for pg in cls.pg_interfaces: pg.admin_up() # Configure IPv4 addresses on VPP pg0. cls.pg0.config_ip4() # Resolve MAC address for VPP's IP address on pg0. cls.pg0.resolve_arp() # Our Multicast address cls.mcast_ip4 = '239.1.1.1' cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4) iplong = atol(cls.mcast_ip4) cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % ( (iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF) # Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1 # into BD. cls.single_tunnel_bd = 1 r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=cls.pg0.remote_ip4n, vni=cls.single_tunnel_bd) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=cls.single_tunnel_bd) cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd) # Setup vni 2 to test multicast flooding cls.n_ucast_tunnels = 10 cls.mcast_flood_bd = 2 cls.create_vxlan_flood_test_bd(cls.mcast_flood_bd, cls.n_ucast_tunnels) r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=cls.mcast_ip4n, mcast_sw_if_index=1, vni=cls.mcast_flood_bd) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=cls.mcast_flood_bd) cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd) # Add and delete mcast tunnels to check stability cls.add_shared_mcast_dst_load() cls.add_mcast_tunnels_load() cls.del_shared_mcast_dst_load() cls.del_mcast_tunnels_load() # Setup vni 3 to test unicast flooding cls.ucast_flood_bd = 3 cls.create_vxlan_flood_test_bd(cls.ucast_flood_bd, cls.n_ucast_tunnels) cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd) except Exception: super(TestVxlan, cls).tearDownClass() raise # Method to define VPP actions before tear down of the test case. # Overrides tearDown method in VppTestCase class. # @param self The object pointer. def tearDown(self): super(TestVxlan, self).tearDown() if not self.vpp_dead: self.logger.info(self.vapi.cli("show bridge-domain 1 detail")) self.logger.info(self.vapi.cli("show bridge-domain 2 detail")) self.logger.info(self.vapi.cli("show bridge-domain 3 detail")) self.logger.info(self.vapi.cli("show vxlan tunnel")) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)
bsd-3-clause
9,104,041,800,281,940,000
8,874,328,677,983,155,000
37.645299
79
0.560323
false
davidbrazdil/nacl
tools/process_oprofile_x86_64.py
12
17097
#!/usr/bin/python # Copyright (c) 2011 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Post-process Oprofile logs for x86-64 nexes running under sel_ldr. Maps event counts in the "anon" region, to the appropriate addresses in the nexe assembly. "Anon" represents the untrusted sandbox. This will become unnecessary once we get immutable files for our .nexe so that sel_ldr can use mmap the .nexe instead of copying it in (Oprofile should understand mmap). Remember to look at the oprofile log for the time spent in the trusted code / OS (this only lists time spent in the untrusted code). """ # TODO(jvoung): consider using addr2line to look up functions with # the linenum / file info instead of the using the rangemap. # Pro: less custom code and possibility of understanding Dwarf info. # Con: lots of exec()s to cover all the samples... import commands import getopt import math import re import sys def Debug(mesg): sys.stdout.flush() # Make stdout/stderr come out in order. print >>sys.stderr, "# ", mesg return def DemangleFunc(fun_name): # In case the disassembly was done without the objdump "-C" flag. # Heuristic for detecting already demangled names # (c++filt will hate you for giving it an already demangled name) if ('(' in fun_name or '*' in fun_name or ':' in fun_name or '&' in fun_name): return fun_name return commands.getoutput("c++filt " + fun_name) # Assume addresses in inputs (logs and assembly files) are all this base. ADDRESS_BASE = 16 ADDRESS_DIGIT = '[a-f0-9]' def GetUntrustedBase(sel_ldr_log_fd): """ Parse the sel_ldr debug output to find the base of the untrusted memory region. Returns the base address. """ untrusted_base = None for line in sel_ldr_log_fd: # base is the mem start addr printed by sel_ldr if line.find('mem start addr') != -1: fields = line.split() untrusted_base = int(fields[-1], ADDRESS_BASE) break assert untrusted_base is not None, "Couldn't parse untrusted base" Debug("untrusted_base = %s" % hex(untrusted_base)) return untrusted_base #--------------- Parse Oprofile Log --------------- def CheckIfInSelLdrRegion(line, cur_range_base): """ Checks if we are reading the part of the oprofile --details log pertaining to the untrusted sandbox in sel_ldr's address space. Returns the base of that memory region or None. """ fields = line.split() # cur_range_base should be set if we are already parsing the # untrusted sandbox section of the log. if cur_range_base: # Check if we are exiting the untrusted sandbox section of the log. # The header of a new non-untrusted-sandbox section should look like: # 00000000 samples pct foo.so foo.so /path-to/foo.so if len(fields) >= 6: Debug('Likely exiting sel_ldr section to a new section: %s' % fields[3]) # Check if the next section is also a sel_ldr region return CheckIfInSelLdrRegion(line, None) else: return cur_range_base else: # Check if we are entering the untrusted-sandbox section of the log. # The header of such a section should look like: # # 00000000 samples pct anon (tgid:22067 range:0xBASE-0xEND) # (sel_ldr or chrome) anon (tgid:22067 range:...) # # I.e., 10 fields... if (len(fields) == 10 and (fields[6] == 'sel_ldr' or fields[6] == 'chrome' or fields[6] == 'nacl_helper_bootstrap') and ('anon' == fields[3])): Debug('Likely starting sel_ldr section: %s %s' % (fields[3], fields[6])) range_token = fields[9] range_re = re.compile('range:0x(' + ADDRESS_DIGIT + '+)-0x') match = range_re.search(range_token) if match: range_str = match.group(1) range_base = int(range_str, ADDRESS_BASE) Debug('Likely range base is %s' % hex(range_base)) return range_base else: Debug("Couldn't parse range base for: " + str(fields)) return None else: return None def UpdateAddrEventMap(line, sel_ldr_range_base, untrusted_base, addr_to_event): """ Add an event count to the addr_to_event map if the line of data looks like an event count. Example: vma samples % 0000028a 1 1.8e-04 """ fields = line.split() if len(fields) == 3: # deal with numbers like fffffff484494ca5 which are actually negative address = int(fields[0], ADDRESS_BASE) if address > 0x8000000000000000: address = -((0xffffffffffffffff - address) + 1) address = address + sel_ldr_range_base - untrusted_base sample_count = int(fields[1]) cur = addr_to_event.get(address, 0) addr_to_event[address] = cur + sample_count return def CheckTrustedRecord(line, trusted_events, filter_events): """ Checks if this line is a samplecount for a trusted function. Because oprofile understands these, we just use its aggregate count. Updates the trusted_events map.""" # oprofile function records have the following format: # address sample_count percent image_name app_name symbol_name # Some symbol names have spaces (function prototypes), so only split 6 words. fields = line.split(None, 5) if len(fields) < 6: return False image_name = fields[3] symbol_name = fields[5].rstrip() # 2 cases: we want only 'relevant' samples, or we want all of them. # Either way, ignore the untrusted region. if (image_name == "anon" and symbol_name.find('sel_ldr') != -1): return False try: # catch lines that aren't records (e.g. the CPU type) sample_count = int(fields[1]) except ValueError: return False if (filter_events and not (image_name.endswith('sel_ldr') or image_name.startswith('llc') or image_name.endswith('.so') or image_name == 'no-vmlinux' or image_name == 'chrome' or image_name == 'nacl_helper_bootstrap')): trusted_events['FILTERED'] = trusted_events.get('FILTERED',0) + sample_count return False # If there are duplicate function names, just use the first instance. # (Most likely they are from shared libraries in different processes, and # because the opreport output is sorted, the top one is most likely to be # our process of interest, and the rest are not.) key = image_name + ':' + symbol_name trusted_events[key] = trusted_events.get(key, sample_count) return True def GetAddressToEventSelLdr(fd, filter_events, untrusted_base): """ Returns 2 maps: addr_to_event: address (int) -> event count (int) and trusted_events: func (str) - > event count (int)""" addr_to_event = {} trusted_events = {} sel_ldr_range_base = None for line in fd: sel_ldr_range_base = CheckIfInSelLdrRegion(line, sel_ldr_range_base) if sel_ldr_range_base: # If we've parsed the header of the region and know the base of # this range, start picking up event counts. UpdateAddrEventMap(line, sel_ldr_range_base, untrusted_base, addr_to_event) else: CheckTrustedRecord(line, trusted_events, filter_events) fd.seek(0) # Reset for future use... return addr_to_event, trusted_events #--------------- Parse Assembly File --------------- def CompareBounds((lb1, ub1), (lb2, ub2)): # Shouldn't be overlapping, so both the upper and lower # should be less than the other's lower bound if (lb1 < lb2) and (ub1 < lb2): return -1 elif (lb1 > ub2) and (ub1 > ub2): return 1 else: # Somewhere between, not necessarily equal. return 0 class RangeMapSorted(object): """ Simple range map using a sorted list of pairs ((lowerBound, upperBound), data). """ ranges = [] # Error indexes (< 0) kGREATER = -2 kLESS = -1 def FindIndex(self, lb, ub): length = len(self.ranges) return self.FindIndexFrom(lb, ub, int(math.ceil(length / 2.0)), 0, length) def FindIndexFrom(self, lb, ub, CurGuess, CurL, CurH): length = len(self.ranges) # If it is greater than the last index, it is greater than all. if CurGuess >= length: return self.kGREATER ((lb2, ub2), _) = self.ranges[CurGuess] comp = CompareBounds((lb, ub), (lb2, ub2)) if comp == 0: return CurGuess elif comp < 0: # If it is less than index 0, it is less than all. if CurGuess == 0: return self.kLESS NextL = CurL NextH = CurGuess NextGuess = CurGuess - int (math.ceil((NextH - NextL) / 2.0)) else: # If it is greater than the last index, it is greater than all. if CurGuess >= length - 1: return self.kGREATER NextL = CurGuess NextH = CurH NextGuess = CurGuess + int (math.ceil((NextH - NextL) / 2.0)) return self.FindIndexFrom(lb, ub, NextGuess, NextL, NextH) def Add(self, lb, ub, data): """ Add a mapping from [lb, ub] --> data """ index = self.FindIndex(lb, ub) range_data = ((lb, ub), data) if index == self.kLESS: self.ranges.insert(0, range_data) elif index == self.kGREATER: self.ranges.append(range_data) else: self.ranges.insert(index, range_data) def Lookup(self, key): """ Get the data that falls within the range. """ index = self.FindIndex(key, key) # Check if it is out of range. if index < 0: return None ((lb, ub), d) = self.ranges[index] # Double check that the key actually falls in range. if lb <= key and key <= ub: return d else: return None def GetRangeFromKey(self, key): index = self.FindIndex(key, key) # Check if it is out of range. if index < 0: return None ((lb, ub), _) = self.ranges[index] # Double check that the key actually falls in range. if lb <= key and key <= ub: return (lb, ub) else: return None ADDRESS_RE = re.compile('(' + ADDRESS_DIGIT + '+):') FUNC_RE = re.compile('(' + ADDRESS_DIGIT + '+) <(.*)>:') def GetAssemblyAddress(line): """ Look for lines of assembly that look like address: [byte] [byte]... [instruction in text] """ fields = line.split() if len(fields) > 1: match = ADDRESS_RE.search(fields[0]) if match: return int(match.group(1), ADDRESS_BASE) return None def GetAssemblyRanges(fd): """ Return a RangeMap that tracks the boundaries of each function. E.g., [0x20000, 0x2003f] --> "foo" [0x20040, 0x20060] --> "bar" """ rmap = RangeMapSorted() cur_start = None cur_func = None cur_end = None for line in fd: # If we are within a function body... if cur_func: # Check if it has ended (with a newline) if line.strip() == '': assert (cur_start and cur_end) rmap.Add(cur_start, cur_end, cur_func) cur_start = None cur_end = None cur_func = None else: maybe_addr = GetAssemblyAddress(line) if maybe_addr: cur_end = maybe_addr else: # Not yet within a function body. Check if we are entering. # The header should look like: # 0000000000020040 <foo>: match = FUNC_RE.search(line) if match: cur_start = int(match.group(1), ADDRESS_BASE) cur_func = match.group(2) fd.seek(0) # reset for future use. return rmap #--------------- Summarize Data --------------- def PrintTopFunctions(assembly_ranges, address_to_events, trusted_events): """ Prints the N functions with the top event counts """ func_events = {} some_addrs_not_found = False for (addr, count) in address_to_events.iteritems(): func = assembly_ranges.Lookup(addr) if (func): # Function labels are mostly unique, except when we have ASM labels # that we mistake for functions. E.g., "loop:" is a common ASM label. # Thus, to get a unique value, we must append the unique key range # to the function label. (lb, ub) = assembly_ranges.GetRangeFromKey(addr) key = (func, lb, ub) cur_count = func_events.get(key, 0) func_events[key] = cur_count + count else: Debug('No matching function for addr/count: %s %d' % (hex(addr), count)) some_addrs_not_found = True if some_addrs_not_found: # Addresses < 0x20000 are likely trampoline addresses. Debug('NOTE: sample addrs < 0x20000 are likely trampolines') filtered_events = trusted_events.pop('FILTERED', 0) # convert trusted functions (which are just functions and not ranges) into # the same format and mix them with untrusted. Just use 0s for the ranges for (func, count) in trusted_events.iteritems(): key = (func, 0, 0) func_events[key] = count flattened = func_events.items() def CompareCounts ((k1, c1), (k2, c2)): if c1 < c2: return -1 elif c1 == c2: return 0 else: return 1 flattened.sort(cmp=CompareCounts, reverse=True) top_30 = flattened[:30] total_samples = (sum(address_to_events.itervalues()) + sum(trusted_events.itervalues())) print "============= Top 30 Functions ===============" print "EVENTS\t\tPCT\tCUM\tFUNC [LOW_VMA, UPPER_VMA]" cum_pct = 0.0 for ((func, lb, ub), count) in top_30: pct = 100.0 * count / total_samples cum_pct += pct print "%d\t\t%.2f\t%.2f\t%s [%s, %s]" % (count, pct, cum_pct, DemangleFunc(func), hex(lb), hex(ub)) print "%d samples filtered (%.2f%% of all samples)" % (filtered_events, 100.0 * filtered_events / (filtered_events + total_samples)) #--------------- Annotate Assembly --------------- def PrintAnnotatedAssembly(fd_in, address_to_events, fd_out): """ Writes to output, a version of assembly_file which has event counts in the form #; EVENTS: N This lets us know which instructions took the most time, etc. """ for line in fd_in: line = line.strip() maybe_addr = GetAssemblyAddress(line) if maybe_addr in address_to_events: event_count = address_to_events[maybe_addr] print >>fd_out, "%s #; EVENTS: %d" % (line, event_count) else: print >>fd_out, line fd_in.seek(0) # reset for future use. #--------------- Main --------------- def main(argv): try: opts, args = getopt.getopt(argv[1:], 'l:s:o:m:f', ['oprofilelog=', 'assembly=', 'output=', 'memmap=', 'untrusted_base=', ]) assembly_file = None assembly_fd = None oprof_log = None oprof_fd = None output = sys.stdout out_name = None filter_events = False # Get the untrusted base address from either a sel_ldr log # which prints out the mapping, or from the command line directly. mapfile_name = None mapfile_fd = None untrusted_base = None for o, a in opts: if o in ('-l', '--oprofilelog'): oprof_log = a oprof_fd = open(oprof_log, 'r') elif o in ('-s', '--assembly'): assembly_file = a assembly_fd = open(assembly_file, 'r') elif o in ('-o', '--output'): out_name = a output = open(out_name, 'w') elif o in ('-m', '--memmap'): mapfile_name = a try: mapfile_fd = open(mapfile_name, 'r') except IOError: pass elif o in ('-b', '--untrusted_base'): untrusted_base = a elif o == '-f': filter_events = True else: assert False, 'unhandled option' if untrusted_base: if mapfile_fd: print 'Error: Specified both untrusted_base directly and w/ memmap file' sys.exit(1) untrusted_base = int(untrusted_base, 16) else: if mapfile_fd: Debug('Parsing sel_ldr output for untrusted memory base: %s' % mapfile_name) untrusted_base = GetUntrustedBase(mapfile_fd) else: print 'Error: Need sel_ldr log --memmap or --untrusted_base.' sys.exit(1) if assembly_file and oprof_log: Debug('Parsing assembly file of nexe: %s' % assembly_file) assembly_ranges = GetAssemblyRanges(assembly_fd) Debug('Parsing oprofile log: %s' % oprof_log) untrusted_events, trusted_events = \ GetAddressToEventSelLdr(oprof_fd, filter_events, untrusted_base) Debug('Printing the top functions (most events)') PrintTopFunctions(assembly_ranges, untrusted_events, trusted_events) Debug('Printing annotated assembly to %s (or stdout)' % out_name) PrintAnnotatedAssembly(assembly_fd, untrusted_events, output) else: print 'Need assembly file(%s) and oprofile log(%s)!' \ % (assembly_file, oprof_log) sys.exit(1) except getopt.GetoptError, err: print str(err) sys.exit(1) if __name__ == '__main__': main(sys.argv)
bsd-3-clause
3,697,856,042,444,690,400
2,191,752,003,681,758,700
34.179012
80
0.616015
false
ivanhorvath/openshift-tools
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
24
6571
"""Ansible callback plugin to print a summary completion status of installation phases. """ from datetime import datetime from ansible.plugins.callback import CallbackBase from ansible import constants as C class CallbackModule(CallbackBase): """This callback summarizes installation phase status.""" CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' CALLBACK_NAME = 'installer_checkpoint' CALLBACK_NEEDS_WHITELIST = False def __init__(self): super(CallbackModule, self).__init__() def v2_playbook_on_stats(self, stats): # Set the order of the installer phases installer_phases = [ 'installer_phase_initialize', 'installer_phase_health', 'installer_phase_etcd', 'installer_phase_nfs', 'installer_phase_loadbalancer', 'installer_phase_master', 'installer_phase_master_additional', 'installer_phase_node', 'installer_phase_glusterfs', 'installer_phase_hosted', 'installer_phase_web_console', 'installer_phase_metrics', 'installer_phase_logging', 'installer_phase_prometheus', 'installer_phase_servicecatalog', 'installer_phase_management', ] # Define the attributes of the installer phases phase_attributes = { 'installer_phase_initialize': { 'title': 'Initialization', 'playbook': '' }, 'installer_phase_health': { 'title': 'Health Check', 'playbook': 'playbooks/openshift-checks/pre-install.yml' }, 'installer_phase_etcd': { 'title': 'etcd Install', 'playbook': 'playbooks/openshift-etcd/config.yml' }, 'installer_phase_nfs': { 'title': 'NFS Install', 'playbook': 'playbooks/openshift-nfs/config.yml' }, 'installer_phase_loadbalancer': { 'title': 'Load balancer Install', 'playbook': 'playbooks/openshift-loadbalancer/config.yml' }, 'installer_phase_master': { 'title': 'Master Install', 'playbook': 'playbooks/openshift-master/config.yml' }, 'installer_phase_master_additional': { 'title': 'Master Additional Install', 'playbook': 'playbooks/openshift-master/additional_config.yml' }, 'installer_phase_node': { 'title': 'Node Install', 'playbook': 'playbooks/openshift-node/config.yml' }, 'installer_phase_glusterfs': { 'title': 'GlusterFS Install', 'playbook': 'playbooks/openshift-glusterfs/config.yml' }, 'installer_phase_hosted': { 'title': 'Hosted Install', 'playbook': 'playbooks/openshift-hosted/config.yml' }, 'installer_phase_web_console': { 'title': 'Web Console Install', 'playbook': 'playbooks/openshift-web-console/config.yml' }, 'installer_phase_metrics': { 'title': 'Metrics Install', 'playbook': 'playbooks/openshift-metrics/config.yml' }, 'installer_phase_logging': { 'title': 'Logging Install', 'playbook': 'playbooks/openshift-logging/config.yml' }, 'installer_phase_prometheus': { 'title': 'Prometheus Install', 'playbook': 'playbooks/openshift-prometheus/config.yml' }, 'installer_phase_servicecatalog': { 'title': 'Service Catalog Install', 'playbook': 'playbooks/openshift-service-catalog/config.yml' }, 'installer_phase_management': { 'title': 'Management Install', 'playbook': 'playbooks/openshift-management/config.yml' }, } # Find the longest phase title max_column = 0 for phase in phase_attributes: max_column = max(max_column, len(phase_attributes[phase]['title'])) if '_run' in stats.custom: self._display.banner('INSTALLER STATUS') for phase in installer_phases: phase_title = phase_attributes[phase]['title'] padding = max_column - len(phase_title) + 2 if phase in stats.custom['_run']: phase_status = stats.custom['_run'][phase]['status'] phase_time = phase_time_delta(stats.custom['_run'][phase]) self._display.display( '{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time), color=self.phase_color(phase_status)) if phase_status == 'In Progress' and phase != 'installer_phase_initialize': self._display.display( '\tThis phase can be restarted by running: {}'.format( phase_attributes[phase]['playbook'])) if 'message' in stats.custom['_run'][phase]: self._display.display( '\t{}'.format( stats.custom['_run'][phase]['message'])) self._display.display("", screen_only=True) def phase_color(self, status): """ Return color code for installer phase""" valid_status = [ 'In Progress', 'Complete', ] if status not in valid_status: self._display.warning('Invalid phase status defined: {}'.format(status)) if status == 'Complete': phase_color = C.COLOR_OK elif status == 'In Progress': phase_color = C.COLOR_ERROR else: phase_color = C.COLOR_WARN return phase_color def phase_time_delta(phase): """ Calculate the difference between phase start and end times """ time_format = '%Y%m%d%H%M%SZ' phase_start = datetime.strptime(phase['start'], time_format) if 'end' not in phase: # The phase failed so set the end time to now phase_end = datetime.now() else: phase_end = datetime.strptime(phase['end'], time_format) delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds return delta
apache-2.0
-3,563,070,693,980,131,300
-8,526,453,083,515,799,000
38.113095
101
0.530665
false
philipz/PyCV-time
opencv-official-samples/2.4.9/demo.py
7
5157
#!/usr/bin/env python ''' Sample-launcher application. ''' import Tkinter as tk from ScrolledText import ScrolledText from glob import glob from common import splitfn import webbrowser from subprocess import Popen #from IPython.Shell import IPShellEmbed #ipshell = IPShellEmbed() exclude_list = ['demo', 'common'] class LinkManager: def __init__(self, text, url_callback = None): self.text = text self.text.tag_config("link", foreground="blue", underline=1) self.text.tag_bind("link", "<Enter>", self._enter) self.text.tag_bind("link", "<Leave>", self._leave) self.text.tag_bind("link", "<Button-1>", self._click) self.url_callback = url_callback self.reset() def reset(self): self.links = {} def add(self, action): # add an action to the manager. returns tags to use in # associated text widget tag = "link-%d" % len(self.links) self.links[tag] = action return "link", tag def _enter(self, event): self.text.config(cursor="hand2") def _leave(self, event): self.text.config(cursor="") def _click(self, event): for tag in self.text.tag_names(tk.CURRENT): if tag.startswith("link-"): proc = self.links[tag] if callable(proc): proc() else: if self.url_callback: self.url_callback(proc) class App: def __init__(self): root = tk.Tk() root.title('OpenCV Demo') self.win = win = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashrelief=tk.RAISED, sashwidth=4) self.win.pack(fill=tk.BOTH, expand=1) left = tk.Frame(win) right = tk.Frame(win) win.add(left) win.add(right) scrollbar = tk.Scrollbar(left, orient=tk.VERTICAL) self.demos_lb = demos_lb = tk.Listbox(left, yscrollcommand=scrollbar.set) scrollbar.config(command=demos_lb.yview) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) demos_lb.pack(side=tk.LEFT, fill=tk.BOTH, expand=1) self.samples = {} for fn in glob('*.py'): name = splitfn(fn)[1] if fn[0] != '_' and name not in exclude_list: demos_lb.insert(tk.END, name) self.samples[name] = fn demos_lb.bind('<<ListboxSelect>>', self.on_demo_select) self.cmd_entry = cmd_entry = tk.Entry(right) cmd_entry.bind('<Return>', self.on_run) run_btn = tk.Button(right, command=self.on_run, text='Run', width=8) self.text = text = ScrolledText(right, font=('arial', 12, 'normal'), width = 30, wrap='word') self.linker = linker = LinkManager(text, self.on_link) self.text.tag_config("header1", font=('arial', 14, 'bold')) self.text.tag_config("header2", font=('arial', 12, 'bold')) text.config(state='disabled') text.pack(fill='both', expand=1, side=tk.BOTTOM) cmd_entry.pack(fill='x', side='left' , expand=1) run_btn.pack() def on_link(self, url): print url webbrowser.open(url) def on_demo_select(self, evt): name = self.demos_lb.get( self.demos_lb.curselection()[0] ) fn = self.samples[name] loc = {} execfile(fn, loc) descr = loc.get('__doc__', 'no-description') self.linker.reset() self.text.config(state='normal') self.text.delete(1.0, tk.END) self.format_text(descr) self.text.config(state='disabled') self.cmd_entry.delete(0, tk.END) self.cmd_entry.insert(0, fn) def format_text(self, s): text = self.text lines = s.splitlines() for i, s in enumerate(lines): s = s.rstrip() if i == 0 and not s: continue if s and s == '='*len(s): text.tag_add('header1', 'end-2l', 'end-1l') elif s and s == '-'*len(s): text.tag_add('header2', 'end-2l', 'end-1l') else: text.insert('end', s+'\n') def add_link(start, end, url): for tag in self.linker.add(url): text.tag_add(tag, start, end) self.match_text(r'http://\S+', add_link) def match_text(self, pattern, tag_proc, regexp=True): text = self.text text.mark_set('matchPos', '1.0') count = tk.IntVar() while True: match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end') if not match_index: break end_index = text.index( "%s+%sc" % (match_index, count.get()) ) text.mark_set('matchPos', end_index) if callable(tag_proc): tag_proc(match_index, end_index, text.get(match_index, end_index)) else: text.tag_add(tag_proc, match_index, end_index) def on_run(self, *args): cmd = self.cmd_entry.get() print 'running:', cmd Popen("python " + cmd, shell=True) def run(self): tk.mainloop() if __name__ == '__main__': App().run()
mit
-187,461,916,994,601,570
-44,649,670,299,897,920
31.847134
103
0.552453
false
biskett/mic
tests/test_archive.py
5
26592
""" It is used to test mic/archive.py """ import os import shutil import unittest from mic import archive class ArchiveTest(unittest.TestCase): """ test pulic methods in archive.py """ def setUp(self): """Create files and directories for later use""" self.relative_file = './sdfb.gxdf.bzws.zzz' abs_file = '/tmp/adsdfb.gxdf.bzws.zzz' bare_file = 'abc.def.bz.zzz' self.relative_dir = './sdf.zzz' abs_dir = '/tmp/asdf.zzz' bare_dir = 'abd.zzz' self.wrong_format_file = './sdbs.werxdf.bz.zzz' self.files = [self.relative_file, abs_file, bare_file] self.dirs = [self.relative_dir, abs_dir, bare_dir] for file_item in self.files: os.system('touch %s' % file_item) for dir_item in self.dirs: self.create_dir(dir_item) shutil.copy(self.relative_file, '%s/1.txt' % dir_item) shutil.copy(self.relative_file, '%s/2.txt' % dir_item) self.create_dir('%s/dir1' % dir_item) self.create_dir('%s/dir2' % dir_item) def tearDown(self): """Clean up unuseful file and directory """ try: for file_item in self.files: os.remove(file_item) for dir_item in self.dirs: shutil.rmtree(dir_item, ignore_errors=True) except OSError: pass def create_dir(self, dir_name): """Create directories and ignore any erros """ try: os.makedirs(dir_name) except OSError: pass def test_get_compress_formats(self): """Test get compress format """ compress_list = archive.get_compress_formats() compress_list.sort() self.assertEqual(compress_list, ['bz2', 'gz', 'lzo']) def test_compress_negtive_file_path_is_required(self): """Test if the first parameter: file path is empty""" with self.assertRaises(OSError): archive.compress('', 'bz2') def test_compress_negtive_compress_format_is_required(self): """Test if the second parameter: compress format is empty""" with self.assertRaises(ValueError): archive.compress(self.relative_file, '') def test_compress_negtive_parameters_are_all_required(self): """Test if two parameters are both empty""" with self.assertRaises(OSError): archive.compress('', '') def test_compress_negtive_file_not_exist(self): """Test target file does not exist""" with self.assertRaises(OSError): archive.compress('a.py', 'bz2') def test_compress_negtive_file_is_dir(self): """Test target is one direcoty, which is not supported""" with self.assertRaises(OSError): archive.compress(self.relative_dir, 'bz2') def test_compress_negtive_wrong_compress_format(self): """Test wrong compress format""" with self.assertRaises(ValueError): archive.compress(self.relative_file, 'bzip2') def _compress_negtive_gz_command_not_exists(self): #TODO: test if command like 'pigz', 'gzip' does not exist pass def _compress_negtive_lzo_command_not_exists(self): #TODO: test if command 'lzop' does not exist pass def _compress_negtive_bz2_command_not_exists(self): #TODO: test if command like 'pbzip2', 'bzip2' does not exist pass def test_compress_gz(self): """Test compress format: gz""" for file_item in self.files: output_name = archive.compress(file_item, 'gz') self.assertEqual('%s.gz' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) os.remove(output_name) def test_compress_bz2(self): """Test compress format: bz2""" for file_item in self.files: output_name = archive.compress(file_item, 'bz2') self.assertEqual('%s.bz2' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) os.remove(output_name) def _test_compress_lzo(self): """Test compress format: lzo""" for file_item in self.files: output_name = archive.compress(file_item, 'lzo') self.assertEqual('%s.lzo' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) os.remove(output_name) def test_decompress_negtive_file_path_is_required(self): """Test if the first parameter: file to be uncompressed is empty""" with self.assertRaises(OSError): archive.decompress('', 'bz') def test_decompress_compress_format_is_empty(self): """Test if the second parameter: compress format is empty string""" output_name = archive.compress(self.relative_file, 'gz') self.assertEqual('%s.gz' % self.relative_file, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(self.relative_file)) archive.decompress(output_name, '') self.assertTrue(os.path.exists(self.relative_file)) def test_decompress_negtive_parameters_are_empty(self): """Test if two parameters are both empty string""" with self.assertRaises(OSError): archive.decompress('', '') def test_decompress_negtive_file_not_exist(self): """Test decompress target does not exist""" with self.assertRaises(OSError): archive.decompress('tresa.py', 'bz2') def test_decompress_negtive_path_is_dir(self): """Test decompress target is a directory""" with self.assertRaises(OSError): archive.decompress(self.relative_dir, 'bz2') def _decompress_negtive_not_corresponding(self): # TODO: test if path is .lzo, but given format is bz2 pass def test_decompress_negtive_wrong_compress_format(self): """Test wrong decompress format""" with self.assertRaises(ValueError): archive.decompress(self.relative_file, 'bzip2') def test_decompress_negtive_wrong_file_format(self): """Test wrong target format""" with self.assertRaises(Exception): archive.decompress(self.wrong_format_file, 'bz2') def test_decompress_gz(self): """Test decompress Format: gz both two parameters are given, one is target file, the other is corresponding compress format""" for file_item in self.files: output_name = archive.compress(file_item, 'gz') self.assertEqual('%s.gz' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name, 'gz') self.assertTrue(os.path.exists(file_item)) def test_decompress_gz_no_compress_format(self): """Test decompress Format: gz one parameters is given, only target file""" for file_item in self.files: output_name = archive.compress(file_item, 'gz') self.assertEqual('%s.gz' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name) self.assertTrue(os.path.exists(file_item)) def test_decompress_bz2(self): """Test decompress Format: bz2 both two parameters are given, one is target file, the other is corresponding compress format""" for file_item in self.files: output_name = archive.compress(file_item, 'bz2') self.assertEqual('%s.bz2' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name, 'bz2') self.assertTrue(os.path.exists(file_item)) def test_decompress_bz2_no_compress_format(self): """Test decompress Format: bz2 one parameters is given, only target file""" for file_item in self.files: output_name = archive.compress(file_item, 'bz2') self.assertEqual('%s.bz2' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name) self.assertTrue(os.path.exists(file_item)) def _test_decompress_lzo(self): """Test decompress Format: lzo both two parameters are given, one is target file, the other is corresponding compress format""" for file_item in self.files: output_name = archive.compress(file_item, 'lzo') self.assertEqual('%s.lzo' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name, 'lzo') self.assertTrue(os.path.exists(file_item)) def _test_decompress_lzo_no_compress_format(self): """Test decompress Format: lzo one parameters is given, only target file""" for file_item in self.files: output_name = archive.compress(file_item, 'lzo') self.assertEqual('%s.lzo' % file_item, output_name) self.assertTrue(os.path.exists(output_name)) self.assertFalse(os.path.exists(file_item)) archive.decompress(output_name) self.assertTrue(os.path.exists(file_item)) def test_get_archive_formats(self): """Test get archive format""" archive_formats = archive.get_archive_formats() archive_formats.sort() self.assertEqual(archive_formats, ["bztar", "gztar", "lzotar", "tar", 'zip']) def test_get_archive_suffixes(self): """Test get archive suffixes""" archive_suffixes = archive.get_archive_suffixes() archive_suffixes.sort() self.assertEqual(archive_suffixes, ['.tar', '.tar.bz', '.tar.bz2', '.tar.gz', '.tar.lzo', '.taz', '.tbz', '.tbz2', '.tgz', '.tzo', '.zip']) def test_make_archive_negtive_archive_name_is_required(self): """Test if first parameter: file path is empty""" with self.assertRaises(Exception): archive.make_archive('', self.relative_dir) def test_extract_archive_negtive_archive_name_is_required(self): """Test if first parameter: file path is empty""" with self.assertRaises(Exception): archive.extract_archive('', self.relative_dir) def test_make_archive_negtive_target_name_is_required(self): """Test if second parameter: target name is empty""" with self.assertRaises(Exception): archive.make_archive('a.zip', '') def _extract_archive_negtive_target_name_is_required(self): # Not sure if the current dir will be used ? # TODO: pass def test_make_archive_negtive_parameters_are_empty(self): """Test if both parameters are empty""" with self.assertRaises(Exception): archive.make_archive('', '') def test_extract_archive_negtive_parameters_are_empty(self): """Test if both parameters are empty""" with self.assertRaises(Exception): archive.extract_archive('', '') def test_make_archive_negtive_target_path_not_exists(self): """Test if file path does not exist""" fake_file = 'abcdfsdf' with self.assertRaises(Exception): archive.make_archive('a.tar', fake_file) with self.assertRaises(Exception): archive.make_archive('a.zip', fake_file) def test_extract_archive_negtive_path_not_exists(self): """Test if file path does not exist""" fake_file = 'abcdfsdf' with self.assertRaises(Exception): archive.extract_archive(fake_file, self.relative_dir) def test_extract_archive_negtive_target_is_file(self): """Test if the extract target is file""" out_file = '%s.tar' % self.relative_dir self.assertTrue(archive.make_archive(out_file, self.relative_dir)) self.assertTrue(os.path.exists(out_file)) with self.assertRaises(Exception): archive.extract_archive(out_file, self.relative_file) os.remove(out_file) def test_make_archive_wrong_format(self): """Test wrong make_archive format""" with self.assertRaises(Exception): archive.make_archive('a.sfsfrwe', self.relative_dir) def test_make_archive_tar_with_different_name(self): """ Test make_archive format: tar It packs the source with another name""" for item in self.files + self.dirs: out_file = 'abcd.tar' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tar(self): """ Test make_archive format: tar""" for item in self.files + self.dirs: out_file = '%s.tar' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_extract_archive_tar(self): """ Test extract format: tar""" for item in self.files: out_file = '%s.tar' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join( out_dir, os.path.basename(item)))) shutil.rmtree(out_dir) for item in self.dirs: out_file = '%s.tar' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, '1.txt'))) self.assertTrue(os.path.exists(os.path.join(out_dir, '2.txt'))) self.assertTrue(os.path.exists(os.path.join(out_dir, 'dir1'))) self.assertTrue(os.path.exists(os.path.join(out_dir, 'dir2'))) shutil.rmtree(out_dir) def test_make_archive_zip_with_different_name(self): """ Test make_archive format: zip It packs the source with another name""" for item in self.files + self.dirs: out_file = 'a.zip' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_zip(self): """ Test make_archive format: zip""" for item in self.files + self.dirs: out_file = '%s.zip' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_zip(self): """ Test extract archive format: zip""" for item in self.files + self.dirs: out_file = '%s.zip' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def _test_make_archive_tzo_with_different_name(self): """ Test make_archive format: tzo It packs the source with another name""" for item in self.files + self.dirs: out_file = 'abc.tzo' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _test_make_archive_tzo(self): """ Test make_archive format: tzo""" for item in self.files + self.dirs: out_file = '%s.tzo' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tzo(self): """ Test extract format: tzo""" for item in self.files + self.dirs: out_file = '%s.tzo' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def _test_make_archive_tar_lzo_with_different_name(self): """ Test make_archive format: lzo It packs the source with another name""" for item in self.files + self.dirs: out_file = 'abc.tar.lzo' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _test_make_archive_tar_lzo(self): """ Test make_archive format: lzo""" for item in self.files + self.dirs: out_file = '%s.tar.lzo' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tar_lzo(self): """ Test extract_archive format: lzo""" for item in self.files + self.dirs: out_file = '%s.tar.lzo' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_taz_with_different_name(self): """ Test make_archive format: taz It packs the source with another name""" for item in self.files + self.dirs: out_file = 'abcd.taz' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_taz(self): """ Test make_archive format: taz""" for item in self.files + self.dirs: out_file = '%s.taz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_taz(self): """ Test extract archive format: taz""" for item in self.files + self.dirs: out_file = '%s.taz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tgz_with_different_name(self): """ Test make_archive format: tgz It packs the source with anotehr name""" for item in self.files + self.dirs: out_file = 'abc.tgz' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tgz(self): """ Test make_archive format: tgz""" for item in self.files + self.dirs: out_file = '%s.tgz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tgz(self): """ Test extract archive format: tgz""" for item in self.files + self.dirs: out_file = '%s.tgz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tar_gz_with_different_name(self): """ Test make_archive format: tar.gz It packs the source with another name""" for item in self.files + self.dirs: out_file = 'erwe.tar.gz' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tar_gz(self): """ Test make_archive format: tar.gz""" for item in self.files + self.dirs: out_file = '%s.tar.gz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tar_gz(self): """ Test extract archive format: tar.gz""" for item in self.files + self.dirs: out_file = '%s.tar.gz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tbz_with_different_name(self): """ Test make_archive format: tbz It packs the source with another name""" for item in self.files + self.dirs: out_file = 'sdfsd.tbz' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tbz(self): """ Test make_archive format: tbz""" for item in self.files + self.dirs: out_file = '%s.tbz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tbz(self): """ Test extract format: tbz""" for item in self.files + self.dirs: out_file = '%s.tbz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tbz2_with_different_name(self): """ Test make_archive format: tbz2 It packs source with another name""" for item in self.files + self.dirs: out_file = 'sfsfd.tbz2' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tbz2(self): """ Test make_archive format: tbz2""" for item in self.files + self.dirs: out_file = '%s.tbz2' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tbz2(self): """ Test extract format: tbz2""" for item in self.files + self.dirs: out_file = '%s.tbz2' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tar_bz_with_different_name(self): """ Test make_archive format: tar.bz It packs source with antoher name""" for item in self.files + self.dirs: out_file = 'sdf.tar.bz' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tar_bz(self): """ Test make_archive format: tar.bz""" for item in self.files + self.dirs: out_file = '%s.tar.bz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tar_bz(self): """ Test extract format: tar.bz""" for item in self.files + self.dirs: out_file = '%s.tar.bz' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) def test_make_archive_tar_bz2_with_different_name(self): """ Test make_archive format: tar.bz2 it packs the source with another name """ for item in self.files + self.dirs: out_file = 'df.tar.bz2' self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def test_make_archive_tar_bz2(self): """ Test make_archive format: tar.bz2""" for item in self.files + self.dirs: out_file = '%s.tar.bz2' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) os.remove(out_file) def _extract_archive_tar_bz2(self): """ Test extract format: tar.bz2""" for item in self.files + self.dirs: out_file = '%s.tar.bz2' % item self.assertTrue(archive.make_archive(out_file, item)) self.assertTrue(os.path.exists(out_file)) out_dir = 'un_tar_dir' archive.extract_archive(out_file, out_dir) self.assertTrue(os.path.exists(os.path.join(out_dir, item))) shutil.rmtree(out_dir) if __name__ == "__main__": unittest.main()
gpl-2.0
-6,882,668,147,737,308,000
-725,414,157,114,971,400
39.536585
79
0.592659
false
Bashar/django
django/core/cache/backends/memcached.py
11
7035
"Memcached cache backend" import time import pickle from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT from django.utils import six from django.utils.deprecation import RenameMethodsBase, RemovedInDjango19Warning from django.utils.encoding import force_str from django.utils.functional import cached_property class BaseMemcachedCacheMethods(RenameMethodsBase): renamed_methods = ( ('_get_memcache_timeout', 'get_backend_timeout', RemovedInDjango19Warning), ) class BaseMemcachedCache(six.with_metaclass(BaseMemcachedCacheMethods, BaseCache)): def __init__(self, server, params, library, value_not_found_exception): super(BaseMemcachedCache, self).__init__(params) if isinstance(server, six.string_types): self._servers = server.split(';') else: self._servers = server # The exception type to catch from the underlying library for a key # that was not found. This is a ValueError for python-memcache, # pylibmc.NotFound for pylibmc, and cmemcache will return None without # raising an exception. self.LibraryValueNotFoundException = value_not_found_exception self._lib = library self._options = params.get('OPTIONS', None) @property def _cache(self): """ Implements transparent thread-safe access to a memcached client. """ if getattr(self, '_client', None) is None: self._client = self._lib.Client(self._servers) return self._client def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): """ Memcached deals with long (> 30 days) timeouts in a special way. Call this function to obtain a safe value for your timeout. """ if timeout == DEFAULT_TIMEOUT: return self.default_timeout if timeout is None: # Using 0 in memcache sets a non-expiring timeout. return 0 elif int(timeout) == 0: # Other cache backends treat 0 as set-and-expire. To achieve this # in memcache backends, a negative timeout must be passed. timeout = -1 if timeout > 2592000: # 60*60*24*30, 30 days # See http://code.google.com/p/memcached/wiki/FAQ # "You can set expire times up to 30 days in the future. After that # memcached interprets it as a date, and will expire the item after # said date. This is a simple (but obscure) mechanic." # # This means that we have to switch to absolute timestamps. timeout += int(time.time()) return int(timeout) def make_key(self, key, version=None): # Python 2 memcache requires the key to be a byte string. return force_str(super(BaseMemcachedCache, self).make_key(key, version)) def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): key = self.make_key(key, version=version) return self._cache.add(key, value, self.get_backend_timeout(timeout)) def get(self, key, default=None, version=None): key = self.make_key(key, version=version) val = self._cache.get(key) if val is None: return default return val def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): key = self.make_key(key, version=version) self._cache.set(key, value, self.get_backend_timeout(timeout)) def delete(self, key, version=None): key = self.make_key(key, version=version) self._cache.delete(key) def get_many(self, keys, version=None): new_keys = [self.make_key(x, version=version) for x in keys] ret = self._cache.get_multi(new_keys) if ret: _ = {} m = dict(zip(new_keys, keys)) for k, v in ret.items(): _[m[k]] = v ret = _ return ret def close(self, **kwargs): self._cache.disconnect_all() def incr(self, key, delta=1, version=None): key = self.make_key(key, version=version) # memcached doesn't support a negative delta if delta < 0: return self._cache.decr(key, -delta) try: val = self._cache.incr(key, delta) # python-memcache responds to incr on non-existent keys by # raising a ValueError, pylibmc by raising a pylibmc.NotFound # and Cmemcache returns None. In all cases, # we should raise a ValueError though. except self.LibraryValueNotFoundException: val = None if val is None: raise ValueError("Key '%s' not found" % key) return val def decr(self, key, delta=1, version=None): key = self.make_key(key, version=version) # memcached doesn't support a negative delta if delta < 0: return self._cache.incr(key, -delta) try: val = self._cache.decr(key, delta) # python-memcache responds to incr on non-existent keys by # raising a ValueError, pylibmc by raising a pylibmc.NotFound # and Cmemcache returns None. In all cases, # we should raise a ValueError though. except self.LibraryValueNotFoundException: val = None if val is None: raise ValueError("Key '%s' not found" % key) return val def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): safe_data = {} for key, value in data.items(): key = self.make_key(key, version=version) safe_data[key] = value self._cache.set_multi(safe_data, self.get_backend_timeout(timeout)) def delete_many(self, keys, version=None): l = lambda x: self.make_key(x, version=version) self._cache.delete_multi(map(l, keys)) def clear(self): self._cache.flush_all() class MemcachedCache(BaseMemcachedCache): "An implementation of a cache binding using python-memcached" def __init__(self, server, params): import memcache super(MemcachedCache, self).__init__(server, params, library=memcache, value_not_found_exception=ValueError) @property def _cache(self): if getattr(self, '_client', None) is None: self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL) return self._client class PyLibMCCache(BaseMemcachedCache): "An implementation of a cache binding using pylibmc" def __init__(self, server, params): import pylibmc super(PyLibMCCache, self).__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound) @cached_property def _cache(self): client = self._lib.Client(self._servers) if self._options: client.behaviors = self._options return client
bsd-3-clause
59,989,107,415,620,820
5,236,484,876,663,454,000
36.222222
98
0.611372
false
abdhaleegit/avocado-misc-tests
io/disk/ioping.py
4
2385
#!/usr/bin/env python # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: 2017 IBM # Author:Praveen K Pandey <praveen@linux.vnet.ibm.com> # import os from avocado import Test from avocado.utils import process, archive, build from avocado.utils.software_manager import SoftwareManager class Ioping(Test): """ Disk I/O latency monitoring tool """ def setUp(self): ''' Build Ioping Test ''' # Check for basic utilities smm = SoftwareManager() self.count = self.params.get('count', default='2') self.mode = self.params.get('mode', default='-C') self.deadline = self.params.get('deadline', default='10') self.period = self.params.get('period', default='10') self.interval = self.params.get('interval', default='1s') self.size = self.params.get('size', default='4k') self.wsize = self.params.get('wsize', default='10m') self.disk = self.params.get('disk', default='/home') for package in ['gcc', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel( "Fail to install %s required for this test." % package) tarball = self.fetch_asset("ioping.zip", locations="https://github.com/" "koct9i/ioping/archive/master.zip", expire='1d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'ioping-master') build.make(self.sourcedir) def test(self): os.chdir(self.sourcedir) cmd = '%s -c %s -w %s -p %s -i %s -s %s -S %s %s' % ( self.mode, self.count, self.deadline, self.period, self.interval, self.size, self.wsize, self.disk) if process.system('./ioping %s' % cmd, ignore_status=True, shell=True): self.fail("test run fails of %s" % cmd)
gpl-2.0
-7,850,140,508,506,735,000
7,692,420,578,425,902,000
32.591549
80
0.618449
false
SivagnanamCiena/ciscoconfparse
ciscoconfparse/ccp_util.py
3
26161
from collections import MutableSequence import itertools import sys import re import os from protocol_values import ASA_TCP_PORTS, ASA_UDP_PORTS from dns.exception import DNSException from dns.resolver import Resolver from dns import reversename, query if sys.version_info[0]<3: from ipaddr import IPv4Network, IPv6Network, IPv4Address, IPv6Address else: from ipaddress import IPv4Network, IPv6Network, IPv4Address, IPv6Address """ ccp_util.py - Parse, Query, Build, and Modify IOS-style configurations Copyright (C) 2014-2015 David Michael Pennington This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. If you need to contact the author, you can do so by emailing: mike [~at~] pennington [/dot\] net """ _IPV6_REGEX_STR = r"""(?!:::\S+?$) # Negative Lookahead for 3 colons (?P<addr> # Begin a group named 'addr' (?P<opt1>{0}(?::{0}){{7}}) # no double colons, option 1 |(?P<opt2>(?:{0}:){{1}}(?::{0}){{1,6}}) # match fe80::1 |(?P<opt3>(?:{0}:){{2}}(?::{0}){{1,5}}) # match fe80:a::1 |(?P<opt4>(?:{0}:){{3}}(?::{0}){{1,4}}) # match fe80:a:b::1 |(?P<opt5>(?:{0}:){{4}}(?::{0}){{1,3}}) # match fe80:a:b:c::1 |(?P<opt6>(?:{0}:){{5}}(?::{0}){{1,2}}) # match fe80:a:b:c:d::1 |(?P<opt7>(?:{0}:){{6}}(?::{0}){{1,1}}) # match fe80:a:b:c:d:e::1 |(?P<opt8>:(?::{0}){{1,7}}) # leading double colons |(?P<opt9>(?:{0}:){{1,7}}:) # trailing double colons |(?P<opt10>(?:::)) # bare double colons (default route) ) # End group named 'addr' """.format(r'[0-9a-fA-F]{1,4}') _IPV6_REGEX_STR_COMPRESSED1 = r"""(?!:::\S+?$)(?P<addr1>(?P<opt1_1>{0}(?::{0}){{7}})|(?P<opt1_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt1_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt1_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt1_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt1_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt1_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt1_8>:(?::{0}){{1,7}})|(?P<opt1_9>(?:{0}:){{1,7}}:)|(?P<opt1_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}') _IPV6_REGEX_STR_COMPRESSED2 = r"""(?!:::\S+?$)(?P<addr2>(?P<opt2_1>{0}(?::{0}){{7}})|(?P<opt2_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt2_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt2_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt2_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt2_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt2_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt2_8>:(?::{0}){{1,7}})|(?P<opt2_9>(?:{0}:){{1,7}}:)|(?P<opt2_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}') _IPV6_REGEX_STR_COMPRESSED3 = r"""(?!:::\S+?$)(?P<addr3>(?P<opt3_1>{0}(?::{0}){{7}})|(?P<opt3_2>(?:{0}:){{1}}(?::{0}){{1,6}})|(?P<opt3_3>(?:{0}:){{2}}(?::{0}){{1,5}})|(?P<opt3_4>(?:{0}:){{3}}(?::{0}){{1,4}})|(?P<opt3_5>(?:{0}:){{4}}(?::{0}){{1,3}})|(?P<opt3_6>(?:{0}:){{5}}(?::{0}){{1,2}})|(?P<opt3_7>(?:{0}:){{6}}(?::{0}){{1,1}})|(?P<opt3_8>:(?::{0}){{1,7}})|(?P<opt3_9>(?:{0}:){{1,7}}:)|(?P<opt3_10>(?:::)))""".format(r'[0-9a-fA-F]{1,4}') _RGX_IPV6ADDR = re.compile(_IPV6_REGEX_STR, re.VERBOSE) _RGX_IPV4ADDR = re.compile(r'^(?P<addr>\d+\.\d+\.\d+\.\d+)') _RGX_IPV4ADDR_NETMASK = re.compile( r""" (?: ^(?P<addr0>\d+\.\d+\.\d+\.\d+)$ |(?:^ (?:(?P<addr1>\d+\.\d+\.\d+\.\d+))(?:\s+|\/)(?:(?P<netmask>\d+\.\d+\.\d+\.\d+)) $) |^(?:\s*(?P<addr2>\d+\.\d+\.\d+\.\d+)(?:\/(?P<masklen>\d+))\s*)$ ) """, re.VERBOSE) ## Emulate the old behavior of ipaddr.IPv4Network in Python2, which can use ## IPv4Network with a host address. Google removed that in Python3's ## ipaddress.py module class IPv4Obj(object): """An object to represent IPv4 addresses and IPv4Networks. When :class:`~ccp_util.IPv4Obj` objects are compared or sorted, shorter masks are greater than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses. Kwargs: - arg (str): A string containing an IPv4 address, and optionally a netmask or masklength. The following address/netmask formats are supported: "10.1.1.1/24", "10.1.1.1 255.255.255.0", "10.1.1.1/255.255.255.0" Attributes: - network_object : An IPv4Network object - ip_object : An IPv4Address object - ip : An IPv4Address object - as_binary_tuple (tuple): The address as a tuple of zero-padded binary strings - as_hex_tuple (tuple): The address as a tuple of zero-padded 8-bit hex strings - as_decimal (int): The ip address as a decimal integer - network (str): A string representing the network address - netmask (str): A string representing the netmask - prefixlen (int): An integer representing the length of the netmask - broadcast (str): A string representing the broadcast address - hostmask (str): A string representing the hostmask - numhosts (int): An integer representing the number of hosts contained in the network Returns: - an instance of :class:`~ccp_util.IPv4Obj`. """ def __init__(self, arg='127.0.0.1/32', strict=False): #RGX_IPV4ADDR = re.compile(r'^(\d+\.\d+\.\d+\.\d+)') #RGX_IPV4ADDR_NETMASK = re.compile(r'(\d+\.\d+\.\d+\.\d+)\s+(\d+\.\d+\.\d+\.\d+)') self.arg = arg mm = _RGX_IPV4ADDR_NETMASK.search(arg) ERROR = "IPv4Obj couldn't parse '{0}'".format(arg) assert (not (mm is None)), ERROR mm_result = mm.groupdict() addr = mm_result['addr0'] or mm_result['addr1'] \ or mm_result['addr2'] or '127.0.0.1' masklen = int(mm_result['masklen'] or 32) netmask = mm_result['netmask'] if netmask: ## ALWAYS check for the netmask first self.network_object = IPv4Network('{0}/{1}'.format(addr, netmask), strict=strict) self.ip_object = IPv4Address('{0}'.format(addr)) else: self.network_object = IPv4Network('{0}/{1}'.format(addr, masklen), strict=strict) self.ip_object = IPv4Address('{0}'.format(addr)) def __repr__(self): return """<IPv4Obj {0}/{1}>""".format(str(self.ip_object), self.prefixlen) def __eq__(self, val): try: if self.network_object==val.network_object: return True return False except (Exception) as e: errmsg = "'{0}' cannot compare itself to '{1}': {2}".format(self.__repr__(), val, e) raise ValueError(errmsg) def __gt__(self, val): try: val_prefixlen = int(getattr(val, 'prefixlen')) val_nobj = getattr(val, 'network_object') self_nobj = self.network_object if (self.network_object.prefixlen<val_prefixlen): # Sort shorter masks as higher... return True elif (self.network_object.prefixlen>val_prefixlen): return False elif (self_nobj>val_nobj): # If masks are equal, rely on Google's sorting... return True return False except: errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val) raise ValueError(errmsg) def __lt__(self, val): try: val_prefixlen = int(getattr(val, 'prefixlen')) val_nobj = getattr(val, 'network_object') self_nobj = self.network_object if (self.network_object.prefixlen>val_prefixlen): # Sort shorter masks as lower... return True elif (self.network_object.prefixlen<val_prefixlen): return False elif (self_nobj<val_nobj): # If masks are equal, rely on Google's sorting... return True return False except: errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val) raise ValueError(errmsg) def __contains__(self, val): # Used for "foo in bar"... python calls bar.__contains__(foo) try: if (self.network_object.prefixlen==0): return True elif self.network_object.prefixlen>val.network_object.prefixlen: # obvious shortcut... if this object's mask is longer than # val, this object cannot contain val return False else: #return (val.network in self.network) return (self.network<=val.network) and \ (self.broadcast>=val.broadcast) except (Exception) as e: raise ValueError("Could not check whether '{0}' is contained in '{1}': {2}".format(val, self, e)) def __hash__(self): # Python3 needs __hash__() return hash(str(self.ip_object))+hash(str(self.prefixlen)) def __iter__(self): return self.network_object.__iter__() def __next__(self): ## For Python3 iteration... return self.network_object.__next__() def next(self): ## For Python2 iteration... return self.network_object.__next__() @property def ip(self): """Returns the address as an IPv4Address object.""" return self.ip_object @property def netmask(self): """Returns the network mask as an IPv4Address object.""" return self.network_object.netmask @property def prefixlen(self): """Returns the length of the network mask as an integer.""" return self.network_object.prefixlen @property def broadcast(self): """Returns the broadcast address as an IPv4Address object.""" if sys.version_info[0]<3: return self.network_object.broadcast else: return self.network_object.broadcast_address @property def network(self): """Returns an IPv4Network object, which represents this network. """ if sys.version_info[0]<3: return self.network_object.network else: ## The ipaddress module returns an "IPAddress" object in Python3... return IPv4Network('{0}'.format(self.network_object.compressed)) @property def hostmask(self): """Returns the host mask as an IPv4Address object.""" return self.network_object.hostmask @property def version(self): """Returns the version of the object as an integer. i.e. 4""" return 4 @property def numhosts(self): """Returns the total number of IP addresses in this network, including broadcast and the "subnet zero" address""" if sys.version_info[0]<3: return self.network_object.numhosts else: return 2**(32-self.network_object.prefixlen) @property def as_decimal(self): """Returns the IP address as a decimal integer""" num_strings = str(self.ip).split('.') num_strings.reverse() # reverse the order return sum([int(num)*(256**idx) for idx, num in enumerate(num_strings)]) @property def as_binary_tuple(self): """Returns the IP address as a tuple of zero-padded binary strings""" return tuple(['{0:08b}'.format(int(num)) for num in \ str(self.ip).split('.')]) @property def as_hex_tuple(self): """Returns the IP address as a tuple of zero-padded hex strings""" return tuple(['{0:02x}'.format(int(num)) for num in \ str(self.ip).split('.')]) @property def is_multicast(self): """Returns a boolean for whether this is a multicast address""" return self.network_object.is_multicast @property def is_private(self): """Returns a boolean for whether this is a private address""" return self.network_object.is_private @property def is_reserved(self): """Returns a boolean for whether this is a reserved address""" return self.network_object.is_reserved ## Emulate the old behavior of ipaddr.IPv6Network in Python2, which can use ## IPv6Network with a host address. Google removed that in Python3's ## ipaddress.py module class IPv6Obj(object): """An object to represent IPv6 addresses and IPv6Networks. When :class:`~ccp_util.IPv6Obj` objects are compared or sorted, shorter masks are greater than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses. Kwargs: - arg (str): A string containing an IPv6 address, and optionally a netmask or masklength. The following address/netmask formats are supported: "2001::dead:beef", "2001::dead:beef/64", Attributes: - network_object : An IPv6Network object - ip_object : An IPv6Address object - ip : An IPv6Address object - as_binary_tuple (tuple): The ipv6 address as a tuple of zero-padded binary strings - as_decimal (int): The ipv6 address as a decimal integer - as_hex_tuple (tuple): The ipv6 address as a tuple of zero-padded 8-bit hex strings - network (str): A string representing the network address - netmask (str): A string representing the netmask - prefixlen (int): An integer representing the length of the netmask - broadcast: raises `NotImplementedError`; IPv6 doesn't use broadcast - hostmask (str): A string representing the hostmask - numhosts (int): An integer representing the number of hosts contained in the network Returns: - an instance of :class:`~ccp_util.IPv6Obj`. """ def __init__(self, arg='::1/128', strict=False): #arg= _RGX_IPV6ADDR_NETMASK.sub(r'\1/\2', arg) # mangle IOS: 'addr mask' self.arg = arg mm = _RGX_IPV6ADDR.search(arg) assert (not (mm is None)), "IPv6Obj couldn't parse {0}".format(arg) self.network_object = IPv6Network(arg, strict=strict) self.ip_object = IPv6Address(mm.group(1)) # 'address_exclude', 'compare_networks', 'hostmask', 'ipv4_mapped', 'iter_subnets', 'iterhosts', 'masked', 'max_prefixlen', 'netmask', 'network', 'numhosts', 'overlaps', 'prefixlen', 'sixtofour', 'subnet', 'supernet', 'teredo', 'with_hostmask', 'with_netmask', 'with_prefixlen' def __repr__(self): return """<IPv6Obj {0}/{1}>""".format(str(self.ip_object), self.prefixlen) def __eq__(self, val): try: if self.network_object==val.network_object: return True return False except (Exception) as e: errmsg = "'{0}' cannot compare itself to '{1}': {2}".format(self.__repr__(), val, e) raise ValueError(errmsg) def __gt__(self, val): try: val_prefixlen = int(getattr(val, 'prefixlen')) val_nobj = getattr(val, 'network_object') self_nobj = self.network_object if (self.network_object.prefixlen<val_prefixlen): # Sort shorter masks as higher... return True elif (self.network_object.prefixlen>val_prefixlen): return False elif (self_nobj>val_nobj): # If masks are equal, rely on Google's sorting... return True return False except: errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val) raise ValueError(errmsg) def __lt__(self, val): try: val_prefixlen = int(getattr(val, 'prefixlen')) val_nobj = getattr(val, 'network_object') self_nobj = self.network_object if (self.network_object.prefixlen>val_prefixlen): # Sort shorter masks as lower... return True elif (self.network_object.prefixlen<val_prefixlen): return False elif (self_nobj<val_nobj): # If masks are equal, rely on Google's sorting... return True return False except: errmsg = "{0} cannot compare itself to '{1}'".format(self.__repr__(), val) raise ValueError(errmsg) def __contains__(self, val): # Used for "foo in bar"... python calls bar.__contains__(foo) try: if (self.network_object.prefixlen==0): return True elif self.network_object.prefixlen>val.network_object.prefixlen: # obvious shortcut... if this object's mask is longer than # val, this object cannot contain val return False else: #return (val.network in self.network) return (self.network<=val.network) and \ (self.broadcast>=val.broadcast) except (Exception) as e: raise ValueError("Could not check whether '{0}' is contained in '{1}': {2}".format(val, self, e)) def __hash__(self): # Python3 needs __hash__() return hash(str(self.ip_object))+hash(str(self.prefixlen)) def __iter__(self): return self.network_object.__iter__() def __next__(self): ## For Python3 iteration... return self.network_object.__next__() def next(self): ## For Python2 iteration... return self.network_object.__next__() @property def ip(self): """Returns the address as an IPv6Address object.""" return self.ip_object @property def netmask(self): """Returns the network mask as an IPv6Address object.""" return self.network_object.netmask @property def prefixlen(self): """Returns the length of the network mask as an integer.""" return self.network_object.prefixlen @property def compressed(self): """Returns the IPv6 object in compressed form""" return self.network_object.compressed @property def exploded(self): """Returns the IPv6 object in exploded form""" return self.network_object.exploded @property def packed(self): """Returns the IPv6 object in packed form""" return self.network_object.packed @property def broadcast(self): raise NotImplementedError("IPv6 does not have broadcasts") @property def network(self): """Returns an IPv6Network object, which represents this network. """ if sys.version_info[0]<3: return self.network_object.network else: ## The ipaddress module returns an "IPAddress" object in Python3... return IPv6Network('{0}'.format(self.network_object.compressed)) @property def hostmask(self): """Returns the host mask as an IPv6Address object.""" return self.network_object.hostmask @property def version(self): """Returns the version of the object as an integer. i.e. 4""" return 6 @property def numhosts(self): """Returns the total number of IP addresses in this network, including broadcast and the "subnet zero" address""" if sys.version_info[0]<3: return self.network_object.numhosts else: return 2**(128-self.network_object.prefixlen) @property def as_decimal(self): """Returns the IP address as a decimal integer""" num_strings = str(self.ip.exploded).split(':') num_strings.reverse() # reverse the order return sum([int(num, 16)*(256**idx) for idx, num in enumerate(num_strings)]) @property def as_binary_tuple(self): """Returns the IPv6 address as a tuple of zero-padded 8-bit binary strings""" nested_list = [ ['{0:08b}'.format(int(ii, 16)) for ii in [num[0:2], num[2:4]]] for num in str(self.ip.exploded).split(':')] return tuple(itertools.chain(*nested_list)) @property def as_hex_tuple(self): """Returns the IPv6 address as a tuple of zero-padded 8-bit hex strings""" nested_list = [ ['{0:02x}'.format(int(ii, 16)) for ii in [num[0:2], num[2:4]]] for num in str(self.ip.exploded).split(':')] return tuple(itertools.chain(*nested_list)) @property def is_multicast(self): """Returns a boolean for whether this is a multicast address""" return self.network_object.is_multicast @property def is_private(self): """Returns a boolean for whether this is a private address""" return self.network_object.is_private @property def is_reserved(self): """Returns a boolean for whether this is a reserved address""" return self.network_object.is_reserved @property def is_link_local(self): """Returns a boolean for whether this is an IPv6 link-local address""" return self.network_object.is_link_local @property def is_site_local(self): """Returns a boolean for whether this is an IPv6 site-local address""" return self.network_object.is_site_local @property def is_unspecified(self): """Returns a boolean for whether this address is not otherwise classified""" return self.network_object.is_unspecified @property def teredo(self): return self.network_object.teredo @property def sixtofour(self): return self.network_object.sixtofour class L4Object(object): """Object for Transport-layer protocols; the object ensures that logical operators (such as le, gt, eq, and ne) are parsed correctly, as well as mapping service names to port numbers""" def __init__(self, protocol='', port_spec='', syntax=''): self.protocol = protocol self.port_list = list() self.syntax = syntax try: port_spec = port_spec.strip() except: port_spec = port_spec if syntax=='asa': if protocol=='tcp': ports = ASA_TCP_PORTS elif protocol=='udp': ports = ASA_UDP_PORTS else: raise NotImplementedError("'{0}' is not supported: '{0}'".format(protocol)) else: raise NotImplementedError("This syntax is unknown: '{0}'".format(syntax)) if 'eq ' in port_spec: port_str = re.split('\s+', port_spec)[-1] self.port_list = [int(ports.get(port_str, port_str))] elif re.search(r'^\S+$', port_spec): # Technically, 'eq ' is optional... self.port_list = [int(ports.get(port_spec, port_spec))] elif 'range ' in port_spec: port_tmp = re.split('\s+', port_spec)[1:] self.port_list = range(int(ports.get(port_tmp[0], port_tmp[0])), int(ports.get(port_tmp[1], port_tmp[1])) + 1) elif 'lt ' in port_spec: port_str = re.split('\s+', port_spec)[-1] self.port_list = range(1, int(ports.get(port_str, port_str))) elif 'gt ' in port_spec: port_str = re.split('\s+', port_spec)[-1] self.port_list = range(int(ports.get(port_str, port_str)) + 1, 65535) elif 'neq ' in port_spec: port_str = re.split('\s+', port_spec)[-1] tmp = set(range(1, 65535)) tmp.remove(int(port_str)) self.port_list = sorted(tmp) def __eq__(self, val): if (self.protocol==val.protocol) and (self.port_list==val.port_list): return True return False def __repr__(self): return "<L4Object {0} {1}>".format(self.protocol, self.port_list) def dns_lookup(input, timeout=3, server=''): """Perform a simple DNS lookup, return results in a dictionary""" resolver = Resolver() resolver.timeout = float(timeout) resolver.lifetime = float(timeout) if server: resolver.nameservers = [server] try: records = resolver.query(input, 'A') return {'addrs': [ii.address for ii in records], 'error': '', 'name': input, } except DNSException as e: return {'addrs': [], 'error': repr(e), 'name': input, } def dns6_lookup(input, timeout=3, server=''): """Perform a simple DNS lookup, return results in a dictionary""" resolver = Resolver() resolver.timeout = float(timeout) resolver.lifetime = float(timeout) if server: resolver.nameservers = [server] try: records = resolver.query(input, 'AAAA') return {'addrs': [ii.address for ii in records], 'error': '', 'name': input, } except DNSException as e: return {'addrs': [], 'error': repr(e), 'name': input, } _REVERSE_DNS_REGEX = re.compile(r'^\s*\d+\.\d+\.\d+\.\d+\s*$') def reverse_dns_lookup(input, timeout=3, server=''): """Perform a simple reverse DNS lookup, return results in a dictionary""" assert _REVERSE_DNS_REGEX.search(input), "Invalid address format: '{0}'".format(input) resolver = Resolver() resolver.timeout = float(timeout) resolver.lifetime = float(timeout) if server: resolver.nameservers = [server] try: tmp = input.strip().split('.') tmp.reverse() inaddr = '.'.join(tmp) + ".in-addr.arpa" records = resolver.query(inaddr, 'PTR') return {'name': records[0].to_text(), 'lookup': inaddr, 'error': '', 'addr': input, } except DNSException as e: return {'addrs': [], 'lookup': inaddr, 'error': repr(e), 'name': input, }
gpl-3.0
1,544,741,415,792,604,700
-6,788,934,940,715,790,000
39.434312
440
0.571385
false
agentfog/qiime
qiime/filter.py
15
26099
#!/usr/bin/env python # File created on 18 May 2010 from __future__ import division __author__ = "Greg Caporaso" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Greg Caporaso", "Will Van Treuren", "Daniel McDonald", "Jai Ram Rideout", "Yoshiki Vazquez Baeza"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Greg Caporaso" __email__ = "gregcaporaso@gmail.com" from collections import defaultdict from random import shuffle, sample from numpy import array, inf from skbio.parse.sequences import parse_fasta, parse_fastq from skbio.format.sequences import format_fastq_record from biom import load_table from qiime.parse import (parse_distmat, parse_mapping_file, parse_metadata_state_descriptions) from qiime.format import format_distance_matrix, format_mapping_file from qiime.util import MetadataMap def get_otu_ids_from_taxonomy_f(positive_taxa=None, negative_taxa=None, metadata_field="taxonomy"): """ return function to pass to Table.filter_observations for taxon-based filtering positive_taxa : a list of strings that will be compared to each taxonomy level in an observation's (i.e., OTU's) metadata_field. If one of the levels matches exactly (except for case) to an item in positive_taxa, that OTU will be marked for retention. Default: All OTUs are retained. negative_taxa : a list of strings that will be compared to each taxonomy level in an observation's (i.e., OTU's) metadata_field. If one of the levels matches exactly (except for case) to an item in negative_taxa, that OTU will be marked for removal. Default: All OTUs are retained. metadata_field : the metadata field to look up in the observation metadata Note: string matches are case insensitive. """ # define a positive screening function - if the user doesn't pass # positive_taxa, all OTUs will pass this filter # (i.e., be marked for retention) if positive_taxa is None: positive_taxa = set() def positive_screen(e): return True else: positive_taxa = set([t.strip().lower() for t in positive_taxa]) def positive_screen(e): return e in positive_taxa # define a negative screening function - if the user doesn't pass # negative_taxa, all OTUs will pass this filter # (i.e., be marked for retention) if negative_taxa is None: negative_taxa = set() def negative_screen(e): return False else: negative_taxa = set([t.strip().lower() for t in negative_taxa]) def negative_screen(e): return e in negative_taxa # The positive_taxa and negative_taxa lists must be mutually exclusive. if len(positive_taxa & negative_taxa) != 0: raise ValueError("Your positive and negative taxa lists contain " "overlapping values. These lists must be mutually " "exclusive.\nOffending values are: %s" % ' '.join(positive_taxa & negative_taxa)) # Define the function that can be passed to Table.filter_observations def result(v, oid, md): positive_hit = False negative_hit = False for e in md[metadata_field]: if positive_screen(e.strip().lower()): # Note that we don't want to just do # positive_hit = positive_screen(e.strip()) # we're checking whether any e hits the positive taxa # and doing that be the same as # positive_hit = md[metadata_field][-1] positive_hit = True if negative_screen(e.strip().lower()): # see note in previous if statement for why we don't use # negative_hit = negative_screen(e.strip()) negative_hit = True return positive_hit and not negative_hit return result def sample_ids_from_metadata_description(mapping_f, valid_states_str): """ Given a description of metadata, return the corresponding sample ids """ map_data, map_header, map_comments = parse_mapping_file(mapping_f) valid_states = parse_metadata_state_descriptions(valid_states_str) sample_ids = get_sample_ids(map_data, map_header, valid_states) if len(sample_ids) < 1: raise ValueError("All samples have been filtered out for the criteria" " described in the valid states") return sample_ids def get_sample_ids(map_data, map_header, states): """Takes col states in {col:[vals]} format. If val starts with !, exclude rather than include. Combines cols with and, states with or. For example, Study:Dog,Hand will return rows where Study is Dog or Hand; Study:Dog,Hand;BodySite:Palm,Stool will return rows where Study is Dog or Hand _and_ BodySite is Palm or Stool; Study:*,!Dog;BodySite:*,!Stool will return all rows except the ones where the Study is Dog or the BodySite is Stool. """ name_to_col = dict([(s, map_header.index(s)) for s in states]) good_ids = [] for row in map_data: # remember to exclude header include = True for s, vals in states.items(): curr_state = row[name_to_col[s]] include = include and (curr_state in vals or '*' in vals) \ and not '!' + curr_state in vals if include: good_ids.append(row[0]) return good_ids def sample_ids_from_category_state_coverage(mapping_f, coverage_category, subject_category, min_num_states=None, required_states=None, considered_states=None, splitter_category=None): """Filter sample IDs based on subject's coverage of a category. Given a category that groups samples by subject (subject_category), samples are filtered by how well a subject covers (i.e. has at least one sample for) the category states in coverage_category. Two filtering criteria are provided (min_num_states and required_states). At least one must be provided. If both are provided, the subject must meet both criteria to pass the filter (i.e. providing both filters is an AND, not an OR, operation). A common use case is to provide a 'time' category for coverage_category and an 'individual' category for subject_category in order to filter out individuals from a study that do not have samples for some minimum number of timepoints (min_num_states) and that do not have samples for certain timepoints (required_states). For example, this could be the first and last timepoints in the study. Returns a set of sample IDs to keep, the number of subjects that were kept, and a set of the unique category states in coverage_category that were kept. The set of sample IDs is not guaranteed to be in any specific order relative to the order of sample IDs or subjects in the mapping file. Arguments: mapping_f - metadata mapping file (file-like object) coverage_category - category to test subjects' coverage (string) subject_category - category to group samples by subject (string) min_num_states - minimum number of category states in coverage_category that a subject must cover (i.e. have at least one sample for) to be included in results (integer) required_states - category states in coverage_category that must be covered by a subject's samples in order to be included in results (list of strings or items that can be converted to strings) considered_states - category states that are counted toward the min_num_states (list of strings or items that can be converted to strings) splitter_category - category to split input mapping file on prior to processing. If not supplied, the mapping file will not be split. If supplied, a dictionary mapping splitter_category state to results will be returned instead of the three-element tuple. The supplied filtering criteria will apply to each split piece of the mapping file independently (e.g. if an individual passes the filters for the tongue samples, his/her tongue samples will be included for the tongue results, even if he/she doesn't pass the filters for the palm samples) """ metadata_map = MetadataMap.parseMetadataMap(mapping_f) # Make sure our input looks sane. categories_to_test = [coverage_category, subject_category] if splitter_category is not None: categories_to_test.append(splitter_category) if 'SampleID' in categories_to_test: raise ValueError("The 'SampleID' category is not suitable for use in " "this function. Please choose a different category " "from the metadata mapping file.") for category in categories_to_test: if category not in metadata_map.CategoryNames: raise ValueError("The category '%s' is not in the metadata " "mapping file." % category) if len(set(categories_to_test)) < len(categories_to_test): raise ValueError("The coverage, subject, and (optional) splitter " "categories must all be unique.") if required_states is not None: # required_states must be in coverage_category's states in the mapping # file. required_states = set(map(str, required_states)) valid_coverage_states = set(metadata_map.getCategoryValues( metadata_map.sample_ids, coverage_category)) invalid_coverage_states = required_states - valid_coverage_states if invalid_coverage_states: raise ValueError("The category state(s) '%s' are not in the '%s' " "category in the metadata mapping file." % (', '.join(invalid_coverage_states), coverage_category)) if considered_states is not None: # considered_states is not as restrictive as required_states - we don't # require that these are present, so it's OK if some of the states # listed here don't actually show up in the mapping file (allowing # the user to pass something like range(100) to consider only states # that fall in some range) considered_states = set(map(str, considered_states)) # define a function to determine if a state should be considered consider_state = lambda s: s in considered_states else: # define a dummy function to consider all states (the default # if the user does not provide a list of considered_states) consider_state = lambda s: True if min_num_states is None and required_states is None: raise ValueError("You must specify either the minimum number of " "category states the subject must have samples for " "(min_num_states), or the minimal category states " "the subject must have samples for " "(required_states), or both. Supplying neither " "filtering criteria is not supported.") if splitter_category is None: results = _filter_sample_ids_from_category_state_coverage( metadata_map, metadata_map.sample_ids, coverage_category, subject_category, consider_state, min_num_states, required_states) else: # "Split" the metadata mapping file by extracting only sample IDs that # match the current splitter category state and using those for the # actual filtering. splitter_category_states = defaultdict(list) for samp_id in metadata_map.sample_ids: splitter_category_state = \ metadata_map.getCategoryValue(samp_id, splitter_category) splitter_category_states[splitter_category_state].append(samp_id) results = {} for splitter_category_state, sample_ids in \ splitter_category_states.items(): results[splitter_category_state] = \ _filter_sample_ids_from_category_state_coverage( metadata_map, sample_ids, coverage_category, subject_category, consider_state, min_num_states, required_states) return results def _filter_sample_ids_from_category_state_coverage(metadata_map, sample_ids, coverage_category, subject_category, consider_state_fn, min_num_states=None, required_states=None): """Helper function to perform filtering based on category state coverage. Not explicitly unit-tested because it is implicitly tested by sample_ids_from_category_state_coverage's unit tests. """ # Build mapping from subject to sample IDs. subjects = defaultdict(list) for samp_id in sample_ids: subject = metadata_map.getCategoryValue(samp_id, subject_category) subjects[subject].append(samp_id) # Perform filtering. samp_ids_to_keep = [] num_subjects_kept = 0 states_kept = [] for subject, samp_ids in subjects.items(): subject_covered_states = set( metadata_map.getCategoryValues(samp_ids, coverage_category)) # Short-circuit evaluation of ANDing filters. keep_subject = True if min_num_states is not None: # note: when summing a list of boolean values, True == 1 and # False == 0 if sum([consider_state_fn(s) for s in subject_covered_states]) < \ min_num_states: keep_subject = False if keep_subject and required_states is not None: if len(subject_covered_states & required_states) != \ len(required_states): keep_subject = False if keep_subject: samp_ids_to_keep.extend(samp_ids) states_kept.extend(subject_covered_states) num_subjects_kept += 1 return set(samp_ids_to_keep), num_subjects_kept, set(states_kept) def filter_fasta(input_seqs_f, output_seqs_f, seqs_to_keep, negate=False, seqid_f=None): """ Write filtered input_seqs to output_seqs_f which contains only seqs_to_keep input_seqs can be the output of parse_fasta or parse_fastq """ if seqid_f is None: seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0] for seq_id in seqs_to_keep]) # Define a function based on the value of negate if not negate: def keep_seq(seq_id): return seq_id.split()[0] in seqs_to_keep_lookup else: def keep_seq(seq_id): return seq_id.split()[0] not in seqs_to_keep_lookup else: if not negate: keep_seq = seqid_f else: keep_seq = lambda x: not seqid_f(x) for seq_id, seq in parse_fasta(input_seqs_f): if keep_seq(seq_id): output_seqs_f.write('>%s\n%s\n' % (seq_id, seq)) output_seqs_f.close() def filter_fastq(input_seqs_f, output_seqs_f, seqs_to_keep, negate=False, seqid_f=None): """ Write filtered input_seqs to output_seqs_f which contains only seqs_to_keep input_seqs can be the output of parse_fasta or parse_fastq """ if seqid_f is None: seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0] for seq_id in seqs_to_keep]) # Define a function based on the value of negate if not negate: def keep_seq(seq_id): return seq_id.split()[0] in seqs_to_keep_lookup else: def keep_seq(seq_id): return seq_id.split()[0] not in seqs_to_keep_lookup else: if not negate: keep_seq = seqid_f else: keep_seq = lambda x: not seqid_f(x) for seq_id, seq, qual in parse_fastq(input_seqs_f, enforce_qual_range=False): if keep_seq(seq_id): output_seqs_f.write(format_fastq_record(seq_id, seq, qual)) output_seqs_f.close() def filter_mapping_file(map_data, map_header, good_sample_ids, include_repeat_cols=False, column_rename_ids=None): """Filters map according to several criteria. - keep only sample ids in good_sample_ids - drop cols that are different in every sample (except id) - drop cols that are the same in every sample """ # keeping samples to_keep = [] to_keep.extend([i for i in map_data if i[0] in good_sample_ids]) # keeping columns headers = [] to_keep = zip(*to_keep) headers.append(map_header[0]) result = [to_keep[0]] if column_rename_ids: # reduce in 1 as we are not using the first colum (SampleID) column_rename_ids = column_rename_ids - 1 for i, l in enumerate(to_keep[1:-1]): if i == column_rename_ids: if len(set(l)) != len(result[0]): raise ValueError( "The column to rename the samples is not unique.") result.append(result[0]) result[0] = l headers.append('SampleID_was_' + map_header[i + 1]) elif include_repeat_cols or len(set(l)) > 1: headers.append(map_header[i + 1]) result.append(l) else: for i, l in enumerate(to_keep[1:-1]): if include_repeat_cols or len(set(l)) > 1: headers.append(map_header[i + 1]) result.append(l) headers.append(map_header[-1]) result.append(to_keep[-1]) result = map(list, zip(*result)) return headers, result def filter_mapping_file_from_mapping_f( mapping_f, sample_ids_to_keep, negate=False): """ Filter rows from a metadata mapping file """ mapping_data, header, comments = parse_mapping_file(mapping_f) filtered_mapping_data = [] sample_ids_to_keep = {}.fromkeys(sample_ids_to_keep) for mapping_datum in mapping_data: hit = mapping_datum[0] in sample_ids_to_keep if hit and not negate: filtered_mapping_data.append(mapping_datum) elif not hit and negate: filtered_mapping_data.append(mapping_datum) else: pass return format_mapping_file(header, filtered_mapping_data) def filter_mapping_file_by_metadata_states(mapping_f, valid_states_str): sample_ids_to_keep = sample_ids_from_metadata_description( mapping_f, valid_states_str) mapping_f.seek(0) return filter_mapping_file_from_mapping_f(mapping_f, sample_ids_to_keep) def filter_samples_from_distance_matrix(dm, samples_to_discard, negate=False): """ Remove specified samples from distance matrix dm: (sample_ids, dm_data) tuple, as returned from qiime.parse.parse_distmat; or a file handle that can be passed to qiime.parse.parse_distmat """ try: sample_ids, dm_data = dm except ValueError: # input was provide as a file handle sample_ids, dm_data = parse_distmat(dm) sample_lookup = {}.fromkeys([e.split()[0] for e in samples_to_discard]) temp_dm_data = [] new_dm_data = [] new_sample_ids = [] if negate: def keep_sample(s): return s in sample_lookup else: def keep_sample(s): return s not in sample_lookup for row, sample_id in zip(dm_data, sample_ids): if keep_sample(sample_id): temp_dm_data.append(row) new_sample_ids.append(sample_id) temp_dm_data = array(temp_dm_data).transpose() for col, sample_id in zip(temp_dm_data, sample_ids): if keep_sample(sample_id): new_dm_data.append(col) new_dm_data = array(new_dm_data).transpose() return format_distance_matrix(new_sample_ids, new_dm_data) def negate_tips_to_keep(tips_to_keep, tree): """ Return the list of tips in the tree that are not in tips_to_keep""" tips_to_keep = set(tips_to_keep) # trees can return node names in ways that have multiple quotes, e.g. # '"node_1"' or ''node_1''. remove them or it can cause problems with # tips_to_keep not matching tmp_tips = set([tip.Name for tip in tree.tips()]) tips = set([t.strip('\'').strip('\"') for t in tmp_tips]) return tips - tips_to_keep def get_seqs_to_keep_lookup_from_biom(biom_f): otu_table = load_table(biom_f) return set(otu_table.ids(axis='observation')) def get_seqs_to_keep_lookup_from_seq_id_file(id_to_keep_f): """generate a lookup dict of chimeras in chimera file.""" return ( set([l.split()[0].strip() for l in id_to_keep_f if l.strip() and not l.startswith('#')]) ) get_seq_ids_from_seq_id_file = get_seqs_to_keep_lookup_from_seq_id_file def get_seqs_to_keep_lookup_from_fasta_file(fasta_f): """return the sequence ids within the fasta file""" return ( set([seq_id.split()[0] for seq_id, seq in parse_fasta(fasta_f)]) ) get_seq_ids_from_fasta_file = get_seqs_to_keep_lookup_from_fasta_file # start functions used by filter_samples_from_otu_table.py and # filter_otus_from_otu_table.py def get_filter_function(ids_to_keep, min_count, max_count, min_nonzero, max_nonzero, negate_ids_to_keep=False): if negate_ids_to_keep: def f(data_vector, id_, metadata): return (id_ not in ids_to_keep) and \ (min_count <= data_vector.sum() <= max_count) and \ (min_nonzero <= (data_vector > 0).sum() <= max_nonzero) else: def f(data_vector, id_, metadata): return (id_ in ids_to_keep) and \ (min_count <= data_vector.sum() <= max_count) and \ (min_nonzero <= (data_vector > 0).sum() <= max_nonzero) return f def filter_samples_from_otu_table(otu_table, ids_to_keep, min_count, max_count, negate_ids_to_keep=False): filter_f = get_filter_function({}.fromkeys(ids_to_keep), min_count, max_count, 0, inf, negate_ids_to_keep) return otu_table.filter(filter_f, axis='sample', inplace=False) def filter_otus_from_otu_table(otu_table, ids_to_keep, min_count, max_count, min_samples, max_samples, negate_ids_to_keep=False): filter_f = get_filter_function({}.fromkeys(ids_to_keep), min_count, max_count, min_samples, max_samples, negate_ids_to_keep) return otu_table.filter(filter_f, axis='observation', inplace=False) # end functions used by filter_samples_from_otu_table.py and # filter_otus_from_otu_table.py def filter_otu_table_to_n_samples(otu_table, n): """ Filter OTU table to n random samples. If n is greater than the number of samples or less than zero a ValueError will be raised. """ if not (0 < n <= len(otu_table.ids())): raise ValueError("Number of samples to filter must be between 0 and " "the number of samples.") return otu_table.subsample(n, axis='sample', by_id=True) def filter_otus_from_otu_map(input_otu_map_fp, output_otu_map_fp, min_count, min_sample_count=1): """ Filter otus with fewer than min_count sequences from input_otu_map_fp With very large data sets the number of singletons can be very large, and it becomes more efficent to filter them at the otu map stage than the otu table stage. There are two outputs from this function: the output file (which is the filtered otu map) and the list of retained otu ids as a set. Since I need to return the retained ids for pick_open_reference_otus, this takes filepaths instead of file handles (since it can't be a generator and return something). """ results = set() output_otu_map_f = open(output_otu_map_fp, 'w') for line in open(input_otu_map_fp, 'U'): fields = line.strip().split('\t') sample_ids = set([e.split('_')[0] for e in fields[1:]]) # only write this line if the otu has more than n sequences (so # greater than n tab-separated fields including the otu identifier) if (len(fields) > min_count) and (len(sample_ids) >= min_sample_count): output_otu_map_f.write(line) results.add(fields[0].split('\t')[0]) output_otu_map_f.close() return results def filter_tree(tree, tips_to_keep): result = tree.copy() # don't use this, it doesn't eliminate tips! # result = tree.getSubTree(tips_to_keep,ignore_missing=True) def f(node): if node.istip() and\ node.Name is not None and\ node.Name not in tips_to_keep and\ node.Name.strip().strip('"').strip("'") not in tips_to_keep: return True return False result.removeDeleted(f) result.prune() return result
gpl-2.0
-5,360,955,507,929,916,000
-3,387,648,447,244,601,000
40.426984
86
0.60351
false
viaregio/cartridge
cartridge/shop/tests.py
2
20578
from datetime import timedelta from decimal import Decimal from operator import mul from django.core.urlresolvers import reverse from django.test import TestCase from django.test.client import RequestFactory from django.utils.timezone import now from django.utils.unittest import skipUnless from mezzanine.conf import settings from mezzanine.core.models import CONTENT_STATUS_PUBLISHED from mezzanine.utils.tests import run_pyflakes_for_package from mezzanine.utils.tests import run_pep8_for_package from cartridge.shop.models import Product, ProductOption, ProductVariation from cartridge.shop.models import Category, Cart, Order, DiscountCode from cartridge.shop.models import Sale from cartridge.shop.forms import OrderForm from cartridge.shop.checkout import CHECKOUT_STEPS TEST_STOCK = 5 TEST_PRICE = Decimal("20") class ShopTests(TestCase): def setUp(self): """ Set up test data - category, product and options. """ self._published = {"status": CONTENT_STATUS_PUBLISHED} self._category = Category.objects.create(**self._published) self._product = Product.objects.create(**self._published) for option_type in settings.SHOP_OPTION_TYPE_CHOICES: for i in range(10): name = "test%s" % i ProductOption.objects.create(type=option_type[0], name=name) self._options = ProductOption.objects.as_fields() def test_views(self): """ Test the main shop views for errors. """ # Category. response = self.client.get(self._category.get_absolute_url()) self.assertEqual(response.status_code, 200) # Product. response = self.client.get(self._product.get_absolute_url()) self.assertEqual(response.status_code, 200) # Cart. response = self.client.get(reverse("shop_cart")) self.assertEqual(response.status_code, 200) # Checkout. response = self.client.get(reverse("shop_checkout")) self.assertEqual(response.status_code, 200 if not settings.SHOP_CHECKOUT_ACCOUNT_REQUIRED else 302) def test_variations(self): """ Test creation of variations from options, and management of empty variations. """ total = reduce(mul, [len(v) for v in self._options.values()]) # Clear variations. self._product.variations.all().delete() self.assertEqual(self._product.variations.count(), 0) # Create single empty variation. self._product.variations.manage_empty() self.assertEqual(self._product.variations.count(), 1) # Create variations from all options. self._product.variations.create_from_options(self._options) # Should do nothing. self._product.variations.create_from_options(self._options) # All options plus empty. self.assertEqual(self._product.variations.count(), total + 1) # Remove empty. self._product.variations.manage_empty() self.assertEqual(self._product.variations.count(), total) def test_stock(self): """ Test stock checking on product variations. """ self._product.variations.all().delete() self._product.variations.manage_empty() variation = self._product.variations.all()[0] variation.num_in_stock = TEST_STOCK # Check stock field not in use. self.assertTrue(variation.has_stock()) # Check available and unavailable quantities. self.assertTrue(variation.has_stock(TEST_STOCK)) self.assertFalse(variation.has_stock(TEST_STOCK + 1)) # Check sold out. variation = self._product.variations.all()[0] variation.num_in_stock = 0 self.assertFalse(variation.has_stock()) def assertCategoryFilteredProducts(self, num_products): """ Tests the number of products returned by the category's current filters. """ products = Product.objects.filter(self._category.filters()) self.assertEqual(products.distinct().count(), num_products) def test_category_filters(self): """ Test the category filters returns expected results. """ self._product.variations.all().delete() self.assertCategoryFilteredProducts(0) # Test option filters - add a variation with one option, and # assign another option as a category filter. Check that no # products match the filters, then add the first option as a # category filter and check that the product is matched. option_field, options = self._options.items()[0] option1, option2 = options[:2] # Variation with the first option. self._product.variations.create_from_options({option_field: [option1]}) # Filter with the second option option = ProductOption.objects.get(type=option_field[-1], name=option2) self.assertCategoryFilteredProducts(0) # First option as a filter. option = ProductOption.objects.get(type=option_field[-1], name=option1) self._category.options.add(option) self.assertCategoryFilteredProducts(1) # Test price filters - add a price filter that when combined # with previously created filters, should match no products. # Update the variations to match the filter for a unit price, # then with sale prices, checking correct matches based on sale # dates. self._category.combined = True self._category.price_min = TEST_PRICE self.assertCategoryFilteredProducts(0) self._product.variations.all().update(unit_price=TEST_PRICE) self.assertCategoryFilteredProducts(1) n, d = now(), timedelta(days=1) tomorrow, yesterday = n + d, n - d self._product.variations.all().update(unit_price=0, sale_price=TEST_PRICE, sale_from=tomorrow) self.assertCategoryFilteredProducts(0) self._product.variations.all().update(sale_from=yesterday) self.assertCategoryFilteredProducts(1) # Clean up previously added filters and check that explicitly # assigned products match. for option in self._category.options.all(): self._category.options.remove(option) self._category.price_min = None self.assertCategoryFilteredProducts(0) self._category.products.add(self._product) self.assertCategoryFilteredProducts(1) # Test the ``combined`` field - create a variation which # matches a price filter, and a separate variation which # matches an option filter, and check that the filters # have no results when ``combined`` is set, and that the # product matches when ``combined`` is disabled. self._product.variations.all().delete() self._product.variations.create_from_options({option_field: [option1, option2]}) # Price variation and filter. variation = self._product.variations.get(**{option_field: option1}) variation.unit_price = TEST_PRICE variation.save() self._category.price_min = TEST_PRICE # Option variation and filter. option = ProductOption.objects.get(type=option_field[-1], name=option2) self._category.options.add(option) # Check ``combined``. self._category.combined = True self.assertCategoryFilteredProducts(0) self._category.combined = False self.assertCategoryFilteredProducts(1) def _add_to_cart(self, variation, quantity): """ Given a variation, creates the dict for posting to the cart form to add the variation, and posts it. """ field_names = [f.name for f in ProductVariation.option_fields()] data = dict(zip(field_names, variation.options())) data["quantity"] = quantity self.client.post(variation.product.get_absolute_url(), data) def _empty_cart(self, cart): """ Given a cart, creates the dict for posting to the cart form to remove all items from the cart, and posts it. """ data = {"items-INITIAL_FORMS": 0, "items-TOTAL_FORMS": 0, "update_cart": 1} for i, item in enumerate(cart): data["items-INITIAL_FORMS"] += 1 data["items-TOTAL_FORMS"] += 1 data["items-%s-id" % i] = item.id data["items-%s-DELETE" % i] = "on" self.client.post(reverse("shop_cart"), data) def _reset_variations(self): """ Recreates variations and sets up the first. """ self._product.variations.all().delete() self._product.variations.create_from_options(self._options) variation = self._product.variations.all()[0] variation.unit_price = TEST_PRICE variation.num_in_stock = TEST_STOCK * 2 variation.save() def test_cart(self): """ Test the cart object and cart add/remove forms. """ # Test initial cart. cart = Cart.objects.from_request(self.client) self.assertFalse(cart.has_items()) self.assertEqual(cart.total_quantity(), 0) self.assertEqual(cart.total_price(), Decimal("0")) # Add quantity and check stock levels / cart totals. self._reset_variations() variation = self._product.variations.all()[0] self._add_to_cart(variation, TEST_STOCK) cart = Cart.objects.from_request(self.client) variation = self._product.variations.all()[0] self.assertTrue(variation.has_stock(TEST_STOCK)) self.assertFalse(variation.has_stock(TEST_STOCK * 2)) self.assertTrue(cart.has_items()) self.assertEqual(cart.total_quantity(), TEST_STOCK) self.assertEqual(cart.total_price(), TEST_PRICE * TEST_STOCK) # Add remaining quantity and check again. self._add_to_cart(variation, TEST_STOCK) cart = Cart.objects.from_request(self.client) variation = self._product.variations.all()[0] self.assertFalse(variation.has_stock()) self.assertTrue(cart.has_items()) self.assertEqual(cart.total_quantity(), TEST_STOCK * 2) self.assertEqual(cart.total_price(), TEST_PRICE * TEST_STOCK * 2) # Remove from cart. self._empty_cart(cart) cart = Cart.objects.from_request(self.client) variation = self._product.variations.all()[0] self.assertTrue(variation.has_stock(TEST_STOCK * 2)) self.assertFalse(cart.has_items()) self.assertEqual(cart.total_quantity(), 0) self.assertEqual(cart.total_price(), Decimal("0")) def test_discount_codes(self): """ Test that all types of discount codes are applied. """ self._reset_variations() variation = self._product.variations.all()[0] invalid_product = Product.objects.create(**self._published) invalid_product.variations.create_from_options(self._options) invalid_variation = invalid_product.variations.all()[0] invalid_variation.unit_price = TEST_PRICE invalid_variation.num_in_stock = TEST_STOCK * 2 invalid_variation.save() discount_value = TEST_PRICE / 2 # Set up discounts with and without a specific product, for # each type of discount. for discount_target in ("cart", "item"): for discount_type in ("percent", "deduct"): code = "%s_%s" % (discount_target, discount_type) kwargs = { "code": code, "discount_%s" % discount_type: discount_value, "active": True, } cart = Cart.objects.from_request(self.client) self._empty_cart(cart) self._add_to_cart(variation, 1) self._add_to_cart(invalid_variation, 1) discount = DiscountCode.objects.create(**kwargs) if discount_target == "item": discount.products.add(variation.product) post_data = {"discount_code": code} self.client.post(reverse("shop_cart"), post_data) discount_total = self.client.session["discount_total"] if discount_type == "percent": expected = TEST_PRICE / Decimal("100") * discount_value if discount_target == "cart": # Excpected amount applies to entire cart. cart = Cart.objects.from_request(self.client) expected *= cart.items.count() elif discount_type == "deduct": expected = discount_value self.assertEqual(discount_total, expected) if discount_target == "item": # Test discount isn't applied for an invalid product. cart = Cart.objects.from_request(self.client) self._empty_cart(cart) self._add_to_cart(invalid_variation, 1) self.client.post(reverse("shop_cart"), post_data) discount_total = self.client.session.get("discount_total") self.assertEqual(discount_total, None) def test_order(self): """ Test that a completed order contains cart items and that they're removed from stock. """ # Add to cart. self._reset_variations() variation = self._product.variations.all()[0] self._add_to_cart(variation, TEST_STOCK) cart = Cart.objects.from_request(self.client) # Post order. data = { "step": len(CHECKOUT_STEPS), "billing_detail_email": "example@example.com", "discount_code": "", } for field_name, field in OrderForm(None, None).fields.items(): value = field.choices[-1][1] if hasattr(field, "choices") else "1" data.setdefault(field_name, value) self.client.post(reverse("shop_checkout"), data) try: order = Order.objects.from_request(self.client) except Order.DoesNotExist: self.fail("Couldn't create an order") items = order.items.all() variation = self._product.variations.all()[0] self.assertEqual(cart.total_quantity(), 0) self.assertEqual(len(items), 1) self.assertEqual(items[0].sku, variation.sku) self.assertEqual(items[0].quantity, TEST_STOCK) self.assertEqual(variation.num_in_stock, TEST_STOCK) self.assertEqual(order.item_total, TEST_PRICE * TEST_STOCK) def test_syntax(self): """ Run pyflakes/pep8 across the code base to check for potential errors. """ extra_ignore = ( "redefinition of unused 'digest'", "redefinition of unused 'OperationalError'", "'from mezzanine.project_template.settings import *' used", ) warnings = [] warnings.extend(run_pyflakes_for_package("cartridge", extra_ignore=extra_ignore)) warnings.extend(run_pep8_for_package("cartridge")) if warnings: self.fail("Syntax warnings!\n\n%s" % "\n".join(warnings)) class SaleTests(TestCase): def setUp(self): product1 = Product(unit_price="1.27") product1.save() ProductVariation(unit_price="1.27", product_id=product1.id).save() ProductVariation(unit_price="1.27", product_id=product1.id).save() product2 = Product(unit_price="1.27") product2.save() ProductVariation(unit_price="1.27", product_id=product2.id).save() ProductVariation(unit_price="1.27", product_id=product2.id).save() sale = Sale( title="30% OFF - Ken Bruce has gone mad!", discount_percent="30" ) sale.save() sale.products.add(product1) sale.products.add(product2) sale.save() def test_sale_save(self): """ Regression test for GitHub issue #24. Incorrect exception handle meant that in some cases (usually percentage discount) sale_prices were not being applied to all products and their varitations. Note: This issues was only relevant using MySQL and with exceptions turned on (which is the default when DEBUG=True). """ # Initially no sale prices will be set. for product in Product.objects.all(): self.assertFalse(product.sale_price) for variation in ProductVariation.objects.all(): self.assertFalse(variation.sale_price) # Activate the sale and verify the prices. sale = Sale.objects.all()[0] sale.active = True sale.save() # Afterward ensure that all the sale prices have been updated. for product in Product.objects.all(): self.assertTrue(product.sale_price) for variation in ProductVariation.objects.all(): self.assertTrue(variation.sale_price) try: __import__("stripe") import mock except ImportError: stripe_used = False else: stripe_handler = "cartridge.shop.payment.stripe_api.process" stripe_used = settings.SHOP_HANDLER_PAYMENT == stripe_handler if stripe_used: settings.STRIPE_API_KEY = "dummy" from cartridge.shop.payment import stripe_api class StripeTests(TestCase): """Test the Stripe payment backend""" def setUp(self): # Every test needs access to the request factory. self.factory = RequestFactory() def test_charge(self, mock_charge): # Create a fake request object with the test data request = self.factory.post("/shop/checkout/") request.POST["card_number"] = "4242424242424242" request.POST["card_expiry_month"] = "06" request.POST["card_expiry_year"] = "2014" request.POST["billing_detail_street"] = "123 Evergreen Terrace" request.POST["billing_detail_city"] = "Springfield" request.POST["billing_detail_state"] = "WA" request.POST["billing_detail_postcode"] = "01234" request.POST["billing_detail_country"] = "USA" # Order form isn't used by stripe backend order_form = None # Create an order order = Order.objects.create(total=Decimal("22.37")) # Code under test stripe_api.process(request, order_form, order) # Assertion mock_charge.create.assert_called_with( amount=2237, currency="usd", card={'number': "4242424242424242", 'exp_month': "06", 'exp_year': "14", 'address_line1': "123 Evergreen Terrace", 'address_city': "Springfield", 'address_state': "WA", 'address_zip': "01234", 'country': "USA"}) StripeTests = skipUnless(stripe_used, "Stripe not used")(StripeTests) if stripe_used: charge = "stripe.Charge" StripeTests.test_charge = mock.patch(charge)(StripeTests.test_charge) class TaxationTests(TestCase): def test_default_handler_exists(self): ''' Ensure that the handler specified in default settings exists as well as the default setting itself. ''' from mezzanine.utils.importing import import_dotted_path settings.use_editable() assert hasattr(settings, 'SHOP_HANDLER_TAX'), \ 'Setting SHOP_HANDLER_TAX not found.' handler = lambda s: import_dotted_path(s) if s else lambda *args: None tax_handler = handler(settings.SHOP_HANDLER_TAX) assert tax_handler is not None, \ 'Could not find default SHOP_HANDLER_TAX function.' def test_set_tax(self): ''' Regression test to ensure that set_tax still sets the appropriate session variables. ''' from cartridge.shop.utils import set_tax tax_type = 'Tax for Testing' tax_total = 56.65 class request: session = {} set_tax(request, tax_type, tax_total) assert request.session.get('tax_type') == tax_type, \ 'tax_type not set with set_tax' assert request.session.get('tax_total') == tax_total, \ 'tax_total not set with set_tax'
bsd-2-clause
-1,400,247,179,012,698,600
531,773,332,709,507,100
38.957282
79
0.61211
false
venkatant/msproject
flow_statistics.py
1
7329
__author__ = 'venkat' from header import * from json_http_handler import * class FlowWindow: bottom_frame = 0 bottom_row = 0 class FlowTable: def __init__(self): self.dest_ip = None self.dest_mask = None self.dest_mac = None self.dest_port = None self.dest_node = None return def updateflowtable(self, destIp, destMask, destMac, destPort, destNode): self.dest_ip = destIp self.dest_mask = destMask self.dest_mac = destMac self.dest_port = destPort self.dest_node = destNode return def displayflowtable(self): print(self.dest_ip, self.dest_mask, self.dest_mac, self.dest_port, self.dest_node) return class FlowStatistics: def __init__(self): self.listbox = None self.toplevel = None self.no_of_flows = 0 def CurSelet(self): print("Hello") switch = str((self.mylistbox.get(self.mylistbox.curselection()))) print(switch) def fillListWithNodesInfo(self): ''' Create an object of Http JSON Handler Class to receive resp from respective Rest URL's ''' http_obj = HttpJsonHandler() json_nodes = http_obj.getnodeinfo() for node in json_nodes['nodeProperties']: self.listbox.insert(END, node['node']['id']) def displayFlowTableTitle(self, bottom_frame, bottom_row): for column in range(5): if column == 0: label = Label(bottom_frame, text="Destination IP", borderwidth=0, width=15, fg="red") elif column == 1: label = Label(bottom_frame, text="Destination Mask", borderwidth=0, width=15, fg="red") elif column == 2: label = Label(bottom_frame, text="Output Mac", borderwidth=0, width=15, fg="red") elif column == 3: label = Label(bottom_frame, text="Output Port", borderwidth=0, width=15, fg="red") elif column == 4: label = Label(bottom_frame, text="Output Node", borderwidth=0, width=25, fg="red") label.configure(bg="white") label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1) return def displayFlowTableContent(self, flow_list, flow_window_obj): bottom_frame = flow_window_obj.bottom_frame bottom_row = flow_window_obj.bottom_row #for row in range(4): for row in flow_list: current_row = [] for column in range(5): if column == 0: label = Label(bottom_frame, text="%s" % row.dest_ip, borderwidth=0, width=15) elif column == 1: label = Label(bottom_frame, text="%s" % row.dest_mask, borderwidth=0, width=15) elif column == 2: label = Label(bottom_frame, text="%s" % row.dest_mac, borderwidth=0, width=15) elif column == 3: label = Label(bottom_frame, text="%s" % row.dest_port, borderwidth=0, width=15) elif column == 4: label = Label(bottom_frame, text="%s" % row.dest_node, borderwidth=0, width=25) label.configure(bg="white") label.grid(row=bottom_row, column=column, sticky="nsew", padx=1, pady=1) current_row.append(label) bottom_row += 1 for column in range(5): bottom_frame.grid_columnconfigure(column, weight=1) return def CurListSelet(self, evt, flow_window_obj): #mylistbox = evt.widget switch=str((self.listbox.get(self.listbox.curselection()))) print(switch) ''' Create an object of Http JSON Handler Class to receive resp from respective Rest URL's ''' http_obj = HttpJsonHandler() json_flows = http_obj.getflowinfo(switch) no_of_flows = 0 flow_list = [] for flowCount in json_flows['flowStatistic']: destIp = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['value'] destMask = json_flows['flowStatistic'][no_of_flows]['flow']['match']['matchField'][0]['mask'] destPort = 0 destnode = '00:00:00:00:00:00:00:00' try: destMac = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['address'] try: destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['id'] destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][1]['port']['node']['id'] except: print('') except KeyError: destPort = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['id'] destnode = json_flows['flowStatistic'][no_of_flows]['flow']['actions'][0]['port']['node']['id'] destMac = '000000000000' # destIp, destMask, destMac, destPort, destNode # Create an instance of FlowTable class flow_table_entry = FlowTable() flow_table_entry.updateflowtable(destIp, destMask, destMac, destPort, destnode) flow_list.append(flow_table_entry) no_of_flows += 1 flow_table_entry.displayflowtable() # sort the list with switch_is as Key flow_list.sort(key=lambda host:host.dest_ip) self.displayFlowTableContent(flow_list, flow_window_obj) def flowstatistics(): # Create an instance of FlowTable class #flow_table_entry = FlowTable() # Create an instance of FlowStatistics class obj = FlowStatistics() ''' scrollbar.config(command=obj.mylistbox.yview) submit = Button(obj.toplevel, text="Submit", command=obj.CurSelet) submit.pack() ''' toplevel = Toplevel() toplevel.title("Flow Monitoring") toplevel.geometry("750x250") top_row = 0 bottom_row = 0 top_frame = Frame(toplevel) top_frame.pack(side=TOP) top_label = Label(top_frame, text=" SELECT SWITCH TO GET FLOW ENTRIES", fg="red", borderwidth=0, width=40) top_label.grid(row=top_row, rowspan=1) top_row += 1 bottom_frame = Frame(toplevel) bottom_frame.pack(side=TOP) bottom_label = Label(bottom_frame, fg="green") bottom_label.grid(row=bottom_row) bottom_row += 1 scrollbar = Scrollbar(top_frame) obj.listbox = Listbox(top_frame, yscrollcommand=scrollbar.set) obj.listbox.config(height=4) # Fills the list of nodes in the List Box obj.fillListWithNodesInfo() obj.listbox.grid(row=top_row, column=0, sticky="nsew", padx=1, pady=1) scrollbar.grid(row=top_row, column=1, sticky="nsew", padx=1, pady=1) scrollbar.config(command=obj.listbox.yview) obj.displayFlowTableTitle(bottom_frame, bottom_row) bottom_row += 1 flow_window_obj = FlowWindow() flow_window_obj.bottom_row = bottom_row flow_window_obj.bottom_frame = bottom_frame # Below code to activate on selection of items in List Box obj.listbox.bind('<<ListboxSelect>>', lambda event, arg=flow_window_obj: obj.CurListSelet(event, flow_window_obj)) return
gpl-2.0
1,481,501,081,220,249,900
-6,796,980,140,957,840,000
32.167421
118
0.583163
false
ninotoshi/tensorflow
tensorflow/contrib/learn/python/learn/tests/test_custom_decay.py
7
2270
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import random from tensorflow.contrib.learn.python import learn from tensorflow.contrib.learn.python.learn import datasets from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split class CustomDecayTest(tf.test.TestCase): def testIrisExponentialDecay(self): random.seed(42) iris = datasets.load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) # setup exponential decay function def exp_decay(global_step): return tf.train.exponential_decay(learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=500, learning_rate=exp_decay) classifier.fit(X_train, y_train) score = accuracy_score(y_test, classifier.predict(X_test)) self.assertGreater(score, 0.65, "Failed with score = {0}".format(score)) if __name__ == "__main__": tf.test.main()
apache-2.0
-7,632,211,994,169,633,000
2,811,706,874,118,013,000
38.824561
86
0.606167
false
tracyjacks/PyMetWeather
pymetweather/pymetweather.py
1
13941
import curses from datetime import date, timedelta import locale from textwrap import fill from pymetweather.forecasts import WeatherForecast from pymetweather.get_args import get_command_line_args, get_config_args locale.setlocale(locale.LC_ALL, '') class WeatherPrinter(object): def __init__(self, forecast, screen_width): self.fcs = forecast self.cols = [ (['Time'], 5, '{$:02}:00'), (['Conditions'], 22, '{W}'), (['Precipitation', 'probability'], 15, '{Pp:>3} %'), (['Temperature', '(Feels Like)'], 14, '{T:>2} {F} °C'), (['Wind Speed', '(Gust)'], 16, '{S:>2} {G} mph'), (['Wind', 'Direction'], 12, '{D:>3}'), (['Relative', 'Humidity'], 10, '{H} %'), (['Visibility'], 12, '{V}'), (['UV', 'Index'], 7, '{U}')] self.daily_cols = [ (['Day'], 13, '{$}', '{$}'), (['Conditions'], 22, '{W}', '{W}'), (['Precipitation', 'probability'], 15, '{PPd:>3} %', '{PPn:>3} %'), (['Max day/', 'Min night', 'Temperature', '(Feels like)'], 14, '{Dm:>2} {FDm} °C', '{Nm:>2} {FNm} °C'), (['Wind Speed', '(Gust)'], 16, '{S:>2} {Gn} mph', '{S:>2} {Gm} mph'), (['Wind', 'Direction'], 12, '{D:>3}', '{D:>3}'), (['Relative', 'Humidity'], 10, '{Hn} %', '{Hm} %'), (['Visibility'], 12, '{V}', '{V}')] self.top_pad = curses.newpad(2000, 500) self.tab_pad = curses.newpad(2000, 500) self.bottom_bar = curses.newpad(1, 500) self.help_screen_pad = curses.newpad(500, 500) self.top_maxy = 0 self.tab_maxy = 0 self.tab_maxx = 0 self.screen_width = screen_width self.print_bottom_bar() self.setup_help() @staticmethod def addustr(win, text, *args): win.addstr(text.encode('utf-8'), *args) def print_help_screen(self, top_only): if not top_only: self.addustr(self.tab_pad, self.help_string) self.tab_maxy = self.help_maxy self.tab_maxx = self.help_maxx def setup_help(self): help = [ ('q', 'Quit'), ('?', 'Show this help'), ('t', "Today's weather"), ('d', 'Five day summary'), ('0', "Today's weather"), ('1', "Tomorrow's weather"), ('2', 'Weather for 2 days later'), ('3', 'Weather for 3 days later'), ('4', 'Weather for 4 days later'), ('5–9', 'UK outlook for the next month'), ('l', 'UK outlook for the next month'), ('left arrow', 'scroll left'), ('right arrow', 'scroll left'), ('up arrow', 'scroll up'), ('down arrow', 'scroll down'), ] c1width = max([len(k[0]) for k in help]) c2width = max([len(k[1]) for k in help]) self.help_string = '' for h in help: self.help_string += h[0].ljust(c1width + 1) + ' : ' + h[1] + '\n' self.help_string = self.help_string.strip('\n') self.help_maxy = len(help) - 1 self.help_maxx = c1width + c2width - 1 def print_bottom_bar(self): self.addustr( self.bottom_bar, '?: help q: quit t: today ' 'd: 5 day summary 1–4: days 1 to 4 ' 'l: longterm'.ljust(499), curses.A_REVERSE | curses.A_BOLD) def print_longer_term_weather(self): regf1 = self.fcs.reg_fcs[2]['Paragraph'] regf2 = self.fcs.reg_fcs[3]['Paragraph'] self.addustr( self.top_pad, self.wrap_text(regf1['title']), curses.A_BOLD) self.addustr(self.top_pad, '\n' + self.wrap_text(regf1['$']) + '\n\n') self.addustr( self.top_pad, self.wrap_text(regf2['title']), curses.A_BOLD) self.addustr(self.top_pad, '\n' + self.wrap_text(regf2['$'])) self.top_maxy = self.top_pad.getyx()[0] + 1 def wrap_text(self, text): return fill(text, self.screen_width) def print_hourly_top(self, n_day, day): title = 'Weather for {}, {}'.format( self.fcs.site_name, day.strftime('%A %d %B %Y')) self.addustr(self.top_pad, self.wrap_text(title) + '\n', curses.A_BOLD) regfindex = 0 regf = self.fcs.reg_fcs[0]['Paragraph'] if n_day == 0: if 'Headline' in regf[regfindex]['title']: self.addustr(self.top_pad, self.wrap_text(regf[regfindex]['$']) + '\n\n') regfindex += 1 if 'Today' in regf[regfindex]['title']: today_text = self.wrap_text('Today: ' + regf[regfindex]['$']) self.addustr(self.top_pad, today_text[:7], curses.A_BOLD) self.addustr(self.top_pad, today_text[7:] + '\n\n') regfindex += 1 if 'Tonight' in regf[regfindex]['title']: tonight_text = self.wrap_text(regf[regfindex]['title'] + ' ' + regf[regfindex]['$']) lent = len(regf[regfindex]['title']) self.addustr(self.top_pad, tonight_text[:lent], curses.A_BOLD) self.addustr(self.top_pad, tonight_text[lent:] + '\n\n') regfindex += 1 elif n_day == 1: for regfindex in range(len(regf)): if day.strftime('%A') in regf[regfindex]['title']: self.addustr( self.top_pad, self.wrap_text(regf[regfindex]['$']) + '\n\n') break else: regf = self.fcs.reg_fcs[1]['Paragraph'] outlook = self.wrap_text(regf['title'] + ' ' + regf['$']) lent = len(regf['title']) + 1 self.addustr(self.top_pad, '\n' + outlook[:lent], curses.A_BOLD) self.addustr(self.top_pad, outlook[lent:] + '\n\n') self.top_maxy = self.top_pad.getyx()[0] + 1 def print_hourly_tab(self, n_day, period): width_counter = 0 for c in self.cols: for i, head in enumerate(c[0]): head_text = '{:^{}}'.format(head, c[1]) self.tab_pad.move(i, width_counter) self.addustr(self.tab_pad, head_text, curses.A_BOLD) width_counter += c[1] top_row = ( self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.cols]) - 1) for i, rep in enumerate(period['Rep']): width_counter = 0 for c in self.cols: cell_text = '{:^{}}'.format(c[2].format(**rep), c[1]) self.tab_pad.move(top_row + i, width_counter) self.addustr(self.tab_pad, cell_text) width_counter += c[1] self.tab_maxy = self.tab_pad.getyx()[0] self.tab_maxx = sum([c[1] for c in self.cols]) - 2 def print_hourly_weather(self, n_day, top_only=False): day = date.today() + timedelta(n_day) period = self.fcs.hourly_fcs['Period'][n_day] assert period['value'] == day.strftime('%Y-%m-%dZ') self.print_hourly_top(n_day, day) if not top_only: self.print_hourly_tab(n_day, period) def print_weather_brief(self, top_only=False): period = self.fcs.daily_fcs['Period'] width_counter = 0 for c in self.daily_cols: for i, head in enumerate(c[0]): head_text = '{:^{}}'.format(head, c[1]) self.tab_pad.move(i, width_counter) self.addustr(self.tab_pad, head_text, curses.A_BOLD) width_counter += c[1] top_row = ( self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.daily_cols])) c = self.daily_cols[0] for i, rep in enumerate(period): cell_text = '{:<{}} '.format(rep['value'], c[1] - 3) self.tab_pad.move(top_row + i * 4, 0) self.addustr(self.tab_pad, cell_text) cell_text = '{:>{}} '.format( c[2].format(**rep['Rep'][0]), c[1] - 3) self.tab_pad.move(top_row + i * 4 + 1, 0) self.addustr(self.tab_pad, cell_text) cell_text = '{:>{}} '.format( c[3].format(**rep['Rep'][1]), c[1] - 3) self.tab_pad.move(top_row + i * 4 + 2, 0) self.addustr(self.tab_pad, cell_text) for i, rep in enumerate(period): rep = rep['Rep'] width_counter = self.daily_cols[0][1] for c in self.daily_cols[1:]: cell_text = '{:^{}}'.format(c[2].format(**rep[0]), c[1]) self.tab_pad.move(top_row + i * 4 + 1, width_counter) self.addustr(self.tab_pad, cell_text) cell_text = '{:^{}}'.format(c[3].format(**rep[1]), c[1]) self.tab_pad.move(top_row + i * 4 + 2, width_counter) self.addustr(self.tab_pad, cell_text) width_counter += c[1] self.tab_maxy = self.tab_pad.getyx()[0] self.tab_maxx = sum([c[1] for c in self.daily_cols]) - 2 def print_screen(self, screen, screen_width=None, top_only=False): if screen_width is not None: self.screen_width = screen_width self.top_pad.clear() self.top_maxy = 0 if not top_only: self.tab_maxy = 0 self.tab_maxx = 0 self.tab_pad.clear() if screen in range(0, 5): self.print_hourly_weather(screen, top_only) elif screen == 8: self.print_longer_term_weather() elif screen == 7: self.print_weather_brief(top_only) elif screen == 9: self.print_help_screen(top_only) class WeatherApp(object): key_map = { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 8, '6': 8, '7': 8, '8': 8, '9': 9, 't': 0, 'l': 8, 'd': 7, 'b': 7, '?': 9} def __init__(self, stdscr, fcs, start_screen=0): self.stdscr = stdscr curses.curs_set(0) curses.use_default_colors() self.fcs = fcs self.scrolly = 0 self.scrollx = 0 self.maxy = 0 self.maxx = 0 self.y = self.stdscr.getmaxyx()[0] - 1 self.x = self.stdscr.getmaxyx()[1] - 1 self.printer = WeatherPrinter(self.fcs, self.x + 1) self.print_screen(start_screen) def print_resize(self): self.y = self.stdscr.getmaxyx()[0] - 1 self.x = self.stdscr.getmaxyx()[1] - 1 self.printer.print_screen(self.screen_showing, self.x + 1, True) self.maxx = max(self.printer.tab_maxx, self.x - 1) self.maxy = self.printer.tab_maxy + self.printer.top_maxy if self.y > (self.maxy - self.scrolly): self.scrolly = max(self.maxy - (self.y - 1), 0) if self.x > (self.maxx - self.scrollx): self.scrollx = max(self.maxx - (self.x - 1), 0) self.draw_screen() def print_screen(self, screen): self.screen_showing = screen self.scrolly = 0 self.scrollx = 0 self.printer.print_screen(self.screen_showing) self.maxy = self.printer.tab_maxy + self.printer.top_maxy self.maxx = max(self.printer.tab_maxx, self.x - 1) self.draw_screen() def draw_screen(self): self.stdscr.clear() self.stdscr.refresh() top_y = self.printer.top_maxy try: assert self.y == self.stdscr.getmaxyx()[0] - 1 assert self.x == self.stdscr.getmaxyx()[1] - 1 except AssertionError: self.print_resize() return self.printer.top_pad.noutrefresh( self.scrolly, 0, 0, 0, min(top_y, self.y), self.x) if self.y - (top_y - self.scrolly) > 1: self.printer.tab_pad.noutrefresh( max(0, self.scrolly - top_y), self.scrollx, top_y - self.scrolly, 0, self.y, self.x) self.printer.bottom_bar.noutrefresh( 0, 0, self.y, 0, self.y, self.x) try: assert self.y == self.stdscr.getmaxyx()[0] - 1 assert self.x == self.stdscr.getmaxyx()[1] - 1 except AssertionError: self.print_resize() return with open('/tmp/log', 'a') as f: f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format( self.maxy, self.y, self.scrolly, self.maxx, self.x, self.scrollx)) curses.doupdate() def main_loop(self): while True: c = self.stdscr.getkey() if c == 'q': return elif c in self.key_map and self.screen_showing != self.key_map[c]: self.print_screen(self.key_map[c]) elif c == 'KEY_RESIZE': self.print_resize() elif c == 'KEY_DOWN': if self.scrolly + self.y - 1 < self.maxy: self.scrolly += 1 self.draw_screen() elif c == 'KEY_UP' and self.scrolly != 0: self.scrolly -= 1 self.draw_screen() elif c == 'KEY_LEFT' and self.scrollx != 0: self.scrollx -= 1 self.draw_screen() elif c == 'KEY_RIGHT': if self.scrollx + self.x - 1 < self.maxx: self.scrollx += 1 self.draw_screen() def run_curses_app(screen, fcs): wap = WeatherApp(screen, fcs) wap.main_loop() def run_app(args): fcs = WeatherForecast(args['api_key'], args['location'], args['datadir']) if args['quiet_update']: fcs.load(True) return fcs.load(args['dont_update']) curses.wrapper(run_curses_app, fcs) def main(): args = get_config_args() args.update(get_command_line_args()) run_app(args)
gpl-2.0
-612,030,466,450,570,900
-5,169,283,271,661,724,000
35.413613
79
0.492955
false
ktnyt/chainer
chainer/testing/distribution_test.py
2
12804
import functools import unittest import numpy import chainer from chainer.backends import cuda from chainer.testing import array from chainer.testing import attr from chainer import utils def skip_not_in_test_target(test_target): def decorator(f): @functools.wraps(f) def new_f(self, *args, **kwargs): if test_target not in self.test_targets: self.skipTest( "\'%s\' is not exist in test_targets." % test_target) else: f(self, *args, **kwargs) return new_f return decorator class distribution_unittest(unittest.TestCase): scipy_onebyone = False def setUp(self): self.support = 'real' if not hasattr(self, 'event_shape'): self.event_shape = () self.continuous = True self.test_targets = set() self.options = {} self.setUp_configure() targets_not_found = self.test_targets - { "batch_shape", "cdf", "entropy", "event_shape", "icdf", "log_cdf", "log_prob", "log_survival", "mean", "prob", "sample", "stddev", "support", "survival", "variance"} if targets_not_found: raise ValueError( "invalid target(s): {}".format(targets_not_found)) if self.is_variable: self.params = {k: chainer.Variable(v) for k, v in self.params.items()} def scipy_onebyone_params_iter(self): for index in numpy.ndindex(self.shape): yield {k: v[index] for k, v in self.scipy_params.items()} @property def cpu_dist(self): params = self.params params.update(self.options) return self.dist(**params) @property def gpu_dist(self): if self.is_variable: gpu_params = {k: cuda.to_gpu(v.data) for k, v in self.params.items()} gpu_params = {k: chainer.Variable(v) for k, v in gpu_params.items()} else: gpu_params = {k: cuda.to_gpu(v) for k, v in self.params.items()} gpu_params.update(self.options) return self.dist(**gpu_params) @skip_not_in_test_target('batch_shape') def test_batch_shape_cpu(self): self.assertEqual(self.cpu_dist.batch_shape, self.shape) @attr.gpu @skip_not_in_test_target('batch_shape') def test_batch_shape_gpu(self): self.assertEqual(self.gpu_dist.batch_shape, self.shape) def check_cdf(self, is_gpu): smp = self.sample_for_test() if is_gpu: cdf1 = self.gpu_dist.cdf(cuda.to_gpu(smp)).data else: cdf1 = self.cpu_dist.cdf(smp).data cdf2 = self.scipy_dist.cdf(smp, **self.scipy_params) array.assert_allclose(cdf1, cdf2) @skip_not_in_test_target('cdf') def test_cdf_cpu(self): self.check_cdf(False) @attr.gpu @skip_not_in_test_target('cdf') def test_cdf_gpu(self): self.check_cdf(True) def check_entropy(self, is_gpu): if is_gpu: ent1 = self.gpu_dist.entropy.data else: ent1 = self.cpu_dist.entropy.data if self.scipy_onebyone: ent2 = [] for one_params in self.scipy_onebyone_params_iter(): ent2.append(self.scipy_dist.entropy(**one_params)) ent2 = numpy.vstack(ent2).reshape(self.shape) else: ent2 = self.scipy_dist.entropy(**self.scipy_params) array.assert_allclose(ent1, ent2) @skip_not_in_test_target('entropy') def test_entropy_cpu(self): self.check_entropy(False) @attr.gpu @skip_not_in_test_target('entropy') def test_entropy_gpu(self): self.check_entropy(True) @skip_not_in_test_target('event_shape') def test_event_shape_cpu(self): self.assertEqual(self.cpu_dist.event_shape, self.event_shape) @attr.gpu @skip_not_in_test_target('event_shape') def test_event_shape_gpu(self): self.assertEqual(self.gpu_dist.event_shape, self.event_shape) def check_icdf(self, is_gpu): smp = numpy.random.uniform( 1e-5, 1 - 1e-5, self.sample_shape + self.shape ).astype(numpy.float32) if is_gpu: icdf1 = self.gpu_dist.icdf(cuda.to_gpu(smp)).data else: icdf1 = self.cpu_dist.icdf(smp).data icdf2 = self.scipy_dist.ppf(smp, **self.scipy_params) array.assert_allclose(icdf1, icdf2) @skip_not_in_test_target('icdf') def test_icdf_cpu(self): self.check_icdf(False) @attr.gpu @skip_not_in_test_target('icdf') def test_icdf_gpu(self): self.check_icdf(True) def check_log_cdf(self, is_gpu): smp = self.sample_for_test() if is_gpu: log_cdf1 = self.gpu_dist.log_cdf(cuda.to_gpu(smp)).data else: log_cdf1 = self.cpu_dist.log_cdf(smp).data log_cdf2 = self.scipy_dist.logcdf(smp, **self.scipy_params) array.assert_allclose(log_cdf1, log_cdf2) @skip_not_in_test_target('log_cdf') def test_log_cdf_cpu(self): self.check_log_cdf(False) @attr.gpu @skip_not_in_test_target('log_cdf') def test_log_cdf_gpu(self): self.check_log_cdf(True) def check_log_prob(self, is_gpu): smp = self.sample_for_test() if is_gpu: log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data else: log_prob1 = self.cpu_dist.log_prob(smp).data if self.continuous: scipy_prob = self.scipy_dist.logpdf else: scipy_prob = self.scipy_dist.logpmf if self.scipy_onebyone: onebyone_smp = smp.reshape(*[ utils.size_of_shape(sh) for sh in [self.sample_shape, self.shape, self.event_shape]]) onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1) onebyone_smp = onebyone_smp.reshape((-1,) + self.sample_shape + self.event_shape) log_prob2 = [] for one_params, one_smp in zip( self.scipy_onebyone_params_iter(), onebyone_smp): log_prob2.append(scipy_prob(one_smp, **one_params)) log_prob2 = numpy.vstack(log_prob2) log_prob2 = log_prob2.reshape( utils.size_of_shape(self.shape), -1).T log_prob2 = log_prob2.reshape(self.sample_shape + self.shape) else: log_prob2 = scipy_prob(smp, **self.scipy_params) array.assert_allclose(log_prob1, log_prob2) @skip_not_in_test_target('log_prob') def test_log_prob_cpu(self): self.check_log_prob(False) @attr.gpu @skip_not_in_test_target('log_prob') def test_log_prob_gpu(self): self.check_log_prob(True) def check_log_survival(self, is_gpu): smp = self.sample_for_test() if is_gpu: log_survival1 = \ self.gpu_dist.log_survival_function(cuda.to_gpu(smp)).data else: log_survival1 = self.cpu_dist.log_survival_function(smp).data log_survival2 = self.scipy_dist.logsf(smp, **self.scipy_params) array.assert_allclose(log_survival1, log_survival2) @skip_not_in_test_target('log_survival') def test_log_survival_cpu(self): self.check_log_survival(False) @attr.gpu @skip_not_in_test_target('log_survival') def test_log_survival_gpu(self): self.check_log_survival(True) def check_mean(self, is_gpu): if is_gpu: mean1 = self.gpu_dist.mean.data else: mean1 = self.cpu_dist.mean.data if self.scipy_onebyone: mean2 = [] for one_params in self.scipy_onebyone_params_iter(): mean2.append(self.scipy_dist.mean(**one_params)) mean2 = numpy.vstack(mean2).reshape( self.shape + self.cpu_dist.event_shape) else: mean2 = self.scipy_dist.mean(**self.scipy_params) array.assert_allclose(mean1, mean2) @skip_not_in_test_target('mean') def test_mean_cpu(self): self.check_mean(False) @attr.gpu @skip_not_in_test_target('mean') def test_mean_gpu(self): self.check_mean(True) def check_prob(self, is_gpu): smp = self.sample_for_test() if is_gpu: prob1 = self.gpu_dist.prob(cuda.to_gpu(smp)).data else: prob1 = self.cpu_dist.prob(smp).data if self.continuous: prob2 = self.scipy_dist.pdf(smp, **self.scipy_params) else: prob2 = self.scipy_dist.pmf(smp, **self.scipy_params) array.assert_allclose(prob1, prob2) @skip_not_in_test_target('prob') def test_prob_cpu(self): self.check_prob(False) @attr.gpu @skip_not_in_test_target('prob') def test_prob_gpu(self): self.check_prob(True) def check_sample(self, is_gpu): if is_gpu: smp1 = self.gpu_dist.sample( sample_shape=(100000,)+self.sample_shape).data else: smp1 = self.cpu_dist.sample( sample_shape=(100000,)+self.sample_shape).data if self.scipy_onebyone: smp2 = [] for one_params in self.scipy_onebyone_params_iter(): smp2.append(self.scipy_dist.rvs( size=(100000,)+self.sample_shape, **one_params)) smp2 = numpy.vstack(smp2) smp2 = smp2.reshape((utils.size_of_shape(self.shape), 100000) + self.sample_shape + self.cpu_dist.event_shape) smp2 = numpy.rollaxis( smp2, 0, smp2.ndim-len(self.cpu_dist.event_shape)) smp2 = smp2.reshape((100000,) + self.sample_shape + self.shape + self.cpu_dist.event_shape) else: smp2 = self.scipy_dist.rvs( size=(100000,) + self.sample_shape + self.shape, **self.scipy_params) array.assert_allclose(smp1.mean(axis=0), smp2.mean(axis=0), atol=3e-2, rtol=3e-2) array.assert_allclose(smp1.std(axis=0), smp2.std(axis=0), atol=3e-2, rtol=3e-2) @skip_not_in_test_target('sample') def test_sample_cpu(self): self.check_sample(False) @attr.gpu @skip_not_in_test_target('sample') def test_sample_gpu(self): self.check_sample(True) def check_stddev(self, is_gpu): if is_gpu: stddev1 = self.gpu_dist.stddev.data else: stddev1 = self.cpu_dist.stddev.data stddev2 = self.scipy_dist.std(**self.scipy_params) array.assert_allclose(stddev1, stddev2) @skip_not_in_test_target('stddev') def test_stddev_cpu(self): self.check_stddev(False) @attr.gpu @skip_not_in_test_target('stddev') def test_stddev_gpu(self): self.check_stddev(True) @skip_not_in_test_target('support') def test_support_cpu(self): self.assertEqual(self.cpu_dist.support, self.support) @attr.gpu @skip_not_in_test_target('support') def test_support_gpu(self): self.assertEqual(self.gpu_dist.support, self.support) def check_survival(self, is_gpu): smp = self.sample_for_test() if is_gpu: survival1 = self.gpu_dist.survival_function( cuda.to_gpu(smp)).data else: survival1 = self.cpu_dist.survival_function(smp).data survival2 = self.scipy_dist.sf(smp, **self.scipy_params) array.assert_allclose(survival1, survival2) @skip_not_in_test_target('survival') def test_survival_cpu(self): self.check_survival(False) @attr.gpu @skip_not_in_test_target('survival') def test_survival_gpu(self): self.check_survival(True) def check_variance(self, is_gpu): if is_gpu: variance1 = self.gpu_dist.variance.data else: variance1 = self.cpu_dist.variance.data if self.scipy_onebyone: variance2 = [] for one_params in self.scipy_onebyone_params_iter(): variance2.append(self.scipy_dist.var(**one_params)) variance2 = numpy.vstack(variance2).reshape( self.shape + self.cpu_dist.event_shape) else: variance2 = self.scipy_dist.var(**self.scipy_params) array.assert_allclose(variance1, variance2) @skip_not_in_test_target('variance') def test_variance_cpu(self): self.check_variance(False) @attr.gpu @skip_not_in_test_target('variance') def test_variance_gpu(self): self.check_variance(True)
mit
6,881,877,651,316,173,000
2,103,061,140,500,510,200
32.34375
78
0.572243
false
jcarva/digital_image_processing_assignments
spatial_domain/python/task1_6.py
1
1722
# coding=UTF-8 # 1.6. Limiarização aplicada sobre Y, com limiar m e duas opções: a) m # escolhido pelo usuáio; b) m = média de valores da banda Y; import numpy as np import utils import color def main(): image = utils.load_image('lenna.png') yiq_image = color.rgb2yiq(image) grayscale_image = yiq_image[:, :, 2] # Y threshold_value = 255 * 0.2 mean_value = np.mean(grayscale_image) threshold_user_image = _segment(grayscale_image, threshold_value) original_threshold_user_image = np.copy(yiq_image) original_threshold_user_image[:, :, 2] = threshold_user_image original_threshold_user_image = color.yiq2rgb(original_threshold_user_image) threshold_mean_image = _segment(grayscale_image, mean_value) original_threshold_mean_image = np.copy(yiq_image) original_threshold_mean_image[:, :, 2] = threshold_mean_image original_threshold_mean_image = color.yiq2rgb(original_threshold_mean_image) utils.display_single_image('Original Image', image) utils.display_single_image('YIQ Image', yiq_image) utils.display_single_image('Y Channel', grayscale_image) utils.display_single_image('Y Threshold (User ' + str(threshold_value) + ')', threshold_user_image) utils.display_single_image('Back to Original (User ' + str(threshold_value) + ')', original_threshold_user_image) utils.display_single_image('Y Threshold (Mean ' + str(mean_value) + ')', threshold_mean_image) utils.display_single_image('Back to Original (Mean ' + str(mean_value) + ')', original_threshold_mean_image) utils.wait_key_and_destroy_windows() def _segment(image, m): output = (image >= m) * 255 return output if __name__ == "__main__": main()
gpl-3.0
-2,468,379,439,826,412,500
-7,595,688,052,664,263,000
34.770833
117
0.689977
false
PXke/invenio
invenio/legacy/websubmit/functions/Create_Modify_Interface.py
1
12922
## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ This is the Create_Modify_Interface function (along with its helpers). It is used by WebSubmit for the "Modify Bibliographic Information" action. """ __revision__ = "$Id$" import os import re import time import pprint from invenio.legacy.dbquery import run_sql from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field from invenio.ext.logging import register_exception def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""): """Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory. Delete the text file after having read-in its value. This function is called on the reload of the modify-record page. This way, the field in question can be populated with the value last entered by the user (before reload), instead of always being populated with the value still found in the DB. """ fld_val = "" if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK): fp = open( "%s/%s" % (cur_dir, fld), "r" ) fld_val = fp.read() fp.close() try: os.unlink("%s/%s"%(cur_dir, fld)) except OSError: # Cannot unlink file - ignore, let WebSubmit main handle this pass fld_val = fld_val.strip() return fld_val def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid): """Read a field's value from the record stored in the DB. This function is called when the Create_Modify_Interface function is called for the first time when modifying a given record, and field values must be retrieved from the database. """ fld_val = "" if fieldcode != "": for next_field_code in [x.strip() for x in fieldcode.split(",")]: fld_val += "%s\n" % Get_Field(next_field_code, recid) fld_val = fld_val.rstrip('\n') return fld_val def Create_Modify_Interface_transform_date(fld_val): """Accept a field's value as a string. If the value is a date in one of the following formats: DD Mon YYYY (e.g. 23 Apr 2005) YYYY-MM-DD (e.g. 2005-04-23) ...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005). """ if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None: try: fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y")) except (ValueError, TypeError): # bad date format: pass elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None: try: fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d")) except (ValueError,TypeError): # bad date format: pass return fld_val def Create_Modify_Interface(parameters, curdir, form, user_info=None): """ Create an interface for the modification of a document, based on the fields that the user has chosen to modify. This avoids having to redefine a submission page for the modifications, but rely on the elements already defined for the initial submission i.e. SBI action (The only page that needs to be built for the modification is the page letting the user specify a document to modify). This function should be added at step 1 of your modification workflow, after the functions that retrieves report number and record id (Get_Report_Number, Get_Recid). Functions at step 2 are the one executed upon successful submission of the form. Create_Modify_Interface expects the following parameters: * "fieldnameMBI" - the name of a text file in the submission working directory that contains a list of the names of the WebSubmit fields to include in the Modification interface. These field names are separated by"\n" or "+". Given the list of WebSubmit fields to be included in the modification interface, the values for each field are retrieved for the given record (by way of each WebSubmit field being configured with a MARC Code in the WebSubmit database). An HTML FORM is then created. This form allows a user to modify certain field values for a record. The file referenced by 'fieldnameMBI' is usually generated from a multiple select form field): users can then select one or several fields to modify Note that the function will display WebSubmit Response elements, but will not be able to set an initial value: this must be done by the Response element iteself. Additionally the function creates an internal field named 'Create_Modify_Interface_DONE' on the interface, that can be retrieved in curdir after the form has been submitted. This flag is an indicator for the function that displayed values should not be retrieved from the database, but from the submitted values (in case the page is reloaded). You can also rely on this value when building your WebSubmit Response element in order to retrieve value either from the record, or from the submission directory. """ global sysno,rn t = "" # variables declaration fieldname = parameters['fieldnameMBI'] # Path of file containing fields to modify the_globals = { 'doctype' : doctype, 'action' : action, 'act' : action, ## for backward compatibility 'step' : step, 'access' : access, 'ln' : ln, 'curdir' : curdir, 'uid' : user_info['uid'], 'uid_email' : user_info['email'], 'rn' : rn, 'last_step' : last_step, 'action_score' : action_score, '__websubmit_in_jail__' : True, 'form': form, 'sysno': sysno, 'user_info' : user_info, '__builtins__' : globals()['__builtins__'], 'Request_Print': Request_Print } if os.path.exists("%s/%s" % (curdir, fieldname)): fp = open( "%s/%s" % (curdir, fieldname), "r" ) fieldstext = fp.read() fp.close() fieldstext = re.sub("\+","\n", fieldstext) fields = fieldstext.split("\n") else: res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,)) if len(res) == 1: fields = res[0][0].replace(" ", "") fields = re.findall("<optionvalue=.*>", fields) regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""") fields = [regexp.search(x) for x in fields] fields = [x.group("value") for x in fields if x is not None] fields = [x for x in fields if x not in ("Select", "select")] else: raise InvenioWebSubmitFunctionError("cannot find fields to modify") #output some text t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn for field in fields: subfield = "" value = "" marccode = "" text = "" # retrieve and display the modification text t = t + "<FONT color=\"darkblue\">\n" res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res)>0: t = t + "<small>%s</small> </FONT>\n" % res[0][0] # retrieve the marc code associated with the field res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res) > 0: marccode = res[0][0] # then retrieve the previous value of the field if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")): # Page has been reloaded - get field value from text file on server, not from DB record value = Create_Modify_Interface_getfieldval_fromfile(curdir, field) else: # First call to page - get field value from DB record value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno) # If field is a date value, transform date into format DD/MM/YYYY: value = Create_Modify_Interface_transform_date(value) res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,)) if len(res) > 0: element_type = res[0][3] numcols = res[0][6] numrows = res[0][5] size = res[0][4] maxlength = res[0][7] val = res[0][8] fidesc = res[0][9] if element_type == "T": text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value) elif element_type == "F": text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength) elif element_type == "I": value = re.sub("[\n\r\t]+", "", value) text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val) text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value) elif element_type == "H": text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val) text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value) elif element_type == "S": values = re.split("[\n\r]+", value) text = fidesc if re.search("%s\[\]" % field, fidesc): multipletext = "[]" else: multipletext = "" if len(values) > 0 and not(len(values) == 1 and values[0] == ""): text += "<SCRIPT>\n" text += "var i = 0;\n" text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext) text += "max = el.length;\n" for val in values: text += "var found = 0;\n" text += "var i=0;\n" text += "while (i != max) {\n" text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val) text += " el.options[i].selected = true;\n" text += " found = 1;\n" text += " }\n" text += " i=i+1;\n" text += "}\n" #text += "if (found == 0) {\n" #text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n" #text += "}\n" text += "</SCRIPT>\n" elif element_type == "D": text = fidesc elif element_type == "R": try: co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec") ## Note this exec is safe WRT global variable because the ## Create_Modify_Interface has already been parsed by ## execfile within a protected environment. the_globals['text'] = '' exec co in the_globals text = the_globals['text'] except: msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals())) register_exception(req=None, alert_admin=True, prefix=msg) raise InvenioWebSubmitFunctionError(msg) else: text = "%s: unknown field type" % field t = t + "<small>%s</small>" % text # output our flag field t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />' # output some more text t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>" return t
gpl-2.0
17,712,845,061,356,868
7,297,461,086,660,370,000
46.682657
252
0.580019
false
quonb/atom-generator
atom_generator/video.py
1
2028
import re class YouTube(object): def __init__(self, url=None): self._video_id = self._extract_id(url) def __call__(self, url=False): if url is None or url: self._video_id = self._extract_id(url) return self._video_id def _extract_id(self, url=None): """Extract youtube video ID Based on `youtube_dl` code """ if not url: return None YOUTUBE_URL = r"""^ (?: (?:https?://)? # http(s):// (optional) (?:(?:(?: (?:\w+\.)?youtube(?:-nocookie)?\.com/| tube\.majestyc\.net/| youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/)| # v/ or embed/ or e/ (?: # or the v= param in all its forms (?: (?:watch|movie)(?:_popup)?(?:\.php)? )? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) v= ) ))| youtu\.be/ # just youtu.be/xxxx ) )? # all until now is optional -> you can pass the naked ID ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow $""" video_id = re.match(YOUTUBE_URL, str(url), re.VERBOSE) return video_id and video_id.group(1) def thumbnail(self): return self._video_id and "http://i.ytimg.com/vi/%s/0.jpg" % self._video_id def video(self): return self._video_id and "http://www.youtube.com/watch?v=%s" % self._video_id
apache-2.0
7,273,609,063,929,546,000
-4,938,316,731,173,279,000
37.264151
97
0.446746
false
readevalprint/mezzanine
mezzanine/utils/cache.py
5
3908
from __future__ import unicode_literals from hashlib import md5 from time import time from django.core.cache import cache from django.utils.lru_cache import lru_cache from django.utils.cache import _i18n_cache_key_suffix from mezzanine.conf import settings from mezzanine.utils.sites import current_site_id from mezzanine.utils.conf import middlewares_or_subclasses_installed def _hashed_key(key): """ Hash keys when talking directly to the cache API, to avoid keys longer than the backend supports (eg memcache limit is 255) """ return md5(key.encode("utf-8")).hexdigest() def cache_set(key, value, timeout=None, refreshed=False): """ Wrapper for ``cache.set``. Stores the cache entry packed with the desired cache expiry time. When the entry is retrieved from cache, the packed expiry time is also checked, and if past, the stale cache entry is stored again with an expiry that has ``CACHE_SET_DELAY_SECONDS`` added to it. In this case the entry is not returned, so that a cache miss occurs and the entry should be set by the caller, but all other callers will still get the stale entry, so no real cache misses ever occur. """ if timeout is None: timeout = settings.CACHE_MIDDLEWARE_SECONDS refresh_time = timeout + time() real_timeout = timeout + settings.CACHE_SET_DELAY_SECONDS packed = (value, refresh_time, refreshed) return cache.set(_hashed_key(key), packed, real_timeout) def cache_get(key): """ Wrapper for ``cache.get``. The expiry time for the cache entry is stored with the entry. If the expiry time has past, put the stale entry back into cache, and don't return it to trigger a fake cache miss. """ packed = cache.get(_hashed_key(key)) if packed is None: return None value, refresh_time, refreshed = packed if (time() > refresh_time) and not refreshed: cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True) return None return value @lru_cache(maxsize=None) def cache_installed(): """ Returns ``True`` if a cache backend is configured, and the cache middleware classes or subclasses thereof are present. This will be evaluated once per run, and then cached. """ has_key = bool(getattr(settings, "NEVERCACHE_KEY", "")) return (has_key and settings.CACHES and not settings.TESTING and middlewares_or_subclasses_installed([ "mezzanine.core.middleware.UpdateCacheMiddleware", "mezzanine.core.middleware.FetchFromCacheMiddleware", ])) def cache_key_prefix(request): """ Cache key for Mezzanine's cache middleware. Adds the current site ID. """ cache_key = "%s.%s.%s" % ( settings.CACHE_MIDDLEWARE_KEY_PREFIX, current_site_id(), # This last part used to indicate the device type for the request, # but device detection was removed in Mezzanine 4.3. # The "default" value was kept to maintain existing cache keys. # See: https://github.com/stephenmcd/mezzanine/pull/1783 "default", ) return _i18n_cache_key_suffix(request, cache_key) def nevercache_token(): """ Returns the secret token that delimits content wrapped in the ``nevercache`` template tag. """ return "nevercache." + settings.NEVERCACHE_KEY def add_cache_bypass(url): """ Adds the current time to the querystring of the URL to force a cache reload. Used for when a form post redirects back to a page that should display updated content, such as new comments or ratings. """ if not cache_installed(): return url hash_str = "" if "#" in url: url, hash_str = url.split("#", 1) hash_str = "#" + hash_str url += "?" if "?" not in url else "&" return url + "t=" + str(time()).replace(".", "") + hash_str
bsd-2-clause
-5,506,201,475,160,155,000
9,046,627,836,681,373,000
33.280702
74
0.668628
false
kc-lab/dms2dfe
dms2dfe/lib/io_data_files.py
2
14758
#!usr/bin/python # Copyright 2016, Rohan Dandage <rraadd_8@hotmail.com,rohan@igib.in> # This program is distributed under General Public License v. 3. """ ================================ ``io_data_files`` ================================ """ import sys import pandas as pd from os.path import exists,basename,abspath,dirname,expanduser import logging from glob import glob import numpy as np from dms2dfe.lib.io_seq_files import get_fsta_feats logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..): %(message)s',level=logging.DEBUG) # filename=cfg_xls_fh+'.log' import pickle ## DEFS def is_cfg_ok(cfg_dh,cfgs) : """ Checks if the required files are present in given directory. :param cfg_dh: path to directory. :param cfgs: list of names of files. """ cfg_dh_cfgs=glob(cfg_dh+"/*") cfg_dh_cfgs=[basename(cfg_dh_cfg) for cfg_dh_cfg in cfg_dh_cfgs] for cfg in cfgs : # check if required sheets are present if not cfg in cfg_dh_cfgs : logging.error("%s does not exist" % cfg) return False break return True def auto_find_missing_paths(prj_dh): """ Finds the missing paths in the configuration given in cfg/ directory :param prj_dh: path to the project directory """ info=pd.read_csv(prj_dh+"/cfg/info") info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)] info=info.set_index("varname") #find pdb_fh and fsta_fh in prj_dh if pd.isnull(info.loc["pdb_fh","input"]): try: info.loc["pdb_fh","input"]=glob("%s/*.pdb" % prj_dh)[0] except: logging.error("can not find .pdb file") if pd.isnull(info.loc["fsta_fh","input"]): try: fsta_fhs=glob("%s/*.fasta" % prj_dh) for fsta_fh in fsta_fhs: if not (('prt' in fsta_fh) or ('_cctmr1.' in fsta_fh)): info.loc["fsta_fh","input"]=fsta_fh break except: logging.error("could not find .fasta file") info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars] info.reset_index().to_csv(prj_dh+"/cfg/info",index=False) # if any(pd.isnull(info_paths)): info_paths_missing=[v for v in info_path_vars if (pd.isnull(info.loc[v,"input"]) and info.loc[v,"default"])] if len(info_paths_missing)>0: logging.error("Values for following variables are missing in 'project_dir/cfg/info' file.") # print [p for p in info_paths if pd.isnull(p)] print info_paths_missing sys.exit() def get_raw_input(info,var): """ Get intearactive inputs from user :param info: dict, with information about experiment :param var: variable whose value is obtained from interactive shell """ # from dms2dfe.lib.io_dfs import set_index # info=set_index(info,'var') val=raw_input("%s: %s (default: %s) =" % (var,info.loc[var, "description"],info.loc[var, "default"])) return val from dms2dfe.lib.io_seq_files import cctmr_fasta2ref_fasta def info2src(prj_dh): """ This converts `.csv` configuration file to `.py` source file saved in `/tmp/`. :param prj_dh: path to project directory """ import subprocess from dms2dfe.lib.io_seq_files import fasta_nts2prt csv2src("%s/../cfg/info" % abspath(dirname(__file__)),"%s/../tmp/info.py" % (abspath(dirname(__file__)))) auto_find_missing_paths(prj_dh) info=pd.read_csv(prj_dh+"/cfg/info") # info=auto_find_missing_paths(prj_dh) info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)] info=info.set_index("varname") # find still missing paths ones info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars] for info_path_var,info_path in zip(info_path_vars,info_paths): # if not exists(info_path): if not ('bowtie' in info_path): if not exists(info_path): if info_path_var=='rscript_fh': info_path = subprocess.check_output(["which", "Rscript"]).replace('\n','') # print info_path while not exists(info_path): logging.error('Path to files do not exist. Include correct path in cfg/info. %s : %s' % (info_path_var,info_path)) info_path=get_raw_input(info,info_path_var) info.loc[info_path_var,'input']=info_path if not pd.isnull(info.loc['cctmr','input']): cctmr=info.loc['cctmr','input'] cctmr=[int("%s" % i) for i in cctmr.split(" ")] fsta_fh=cctmr_fasta2ref_fasta(info.loc['fsta_fh','input'],cctmr) else: fsta_fh=info.loc['fsta_fh','input'] info.loc['prj_dh','input']=abspath(prj_dh) info.loc['fsta_id','input'],info.loc['fsta_seq','input'],info.loc['fsta_len','input']=get_fsta_feats(fsta_fh) host=info.loc['host','input'] if pd.isnull(host): host=info.loc['host','default'] info.loc['prt_seq','input']=fasta_nts2prt(fsta_fh,host=host).replace('*','X') info.reset_index().to_csv(prj_dh+"/cfg/info",index=False) csv2src(prj_dh+"/cfg/info","%s/../tmp/info.py" % (abspath(dirname(__file__)))) csv2src(prj_dh+"/cfg/info",prj_dh+"/cfg/info.py") logging.info("configuration compiled: %s/cfg/info" % prj_dh) def csv2src(csv_fh,src_fh): """ This writes `.csv` to `.py` source file. :param csv_fh: path to input `.csv` file. :param src_fh: path to output `.py` source file. """ info=pd.read_csv(csv_fh) info=info.set_index('varname') src_f=open(src_fh,'w') src_f.write("#!usr/bin/python\n") src_f.write("\n") src_f.write("# source file for dms2dfe's configuration \n") src_f.write("\n") for var in info.iterrows() : val=info['input'][var[0]] if pd.isnull(val): val=info['default'][var[0]] src_f.write("%s='%s' #%s\n" % (var[0],val,info["description"][var[0]])) src_f.close() def raw_input2info(prj_dh,inputORdefault): """ This writes configuration `.csv` file from `raw_input` from prompt. :param prj_dh: path to project directory. :param inputORdefault: column name "input" or "default". """ info=pd.read_csv(prj_dh+"/cfg/info") info=info.set_index("varname",drop=True) for var in info.index.values: val=raw_input("%s (default: %s) =" % (info.loc[var, "description"],info.loc[var, "default"])) if not val=='': info.loc[var, inputORdefault]=val info.reset_index().to_csv("%s/cfg/info" % prj_dh, index=False) def is_xls_ok(cfg_xls,cfg_xls_sheetnames_required) : """ Checks if the required sheets are present in the configuration excel file. :param cfg_xls: path to configuration excel file """ cfg_xls_sheetnames=cfg_xls.sheet_names cfg_xls_sheetnames= [str(x) for x in cfg_xls_sheetnames]# unicode to str for qry_sheet_namei in cfg_xls_sheetnames_required : # check if required sheets are present #qry_sheet_namei=str(qry_sheet_namei) if not qry_sheet_namei in cfg_xls_sheetnames : logging.error("pipeline : sheetname '%s' does not exist" % qry_sheet_namei) return False break return True def is_info_ok(xls_fh): """ This checks the sanity of info sheet in the configuration excel file. For example if the files exists or not. :param cfg_xls: path to configuration excel file """ info=pd.read_excel(xls_fh,'info') info_path_vars=[varn for varn in info['varname'] if ("_fh" in varn) or ("_dh" in varn)] info=info.set_index("varname") info_paths=[info.loc[info_path_var,"input"] for info_path_var in info_path_vars] for info_path in info_paths: if not pd.isnull(info_path): if not exists(info_path): return False #(info_path_vars[info_paths.index(info_path)],info_path) break return True def xls2h5(cfg_xls,cfg_h5,cfg_xls_sheetnames_required) : """ Converts configuration excel file to HDF5(h5) file. Here sheets in excel files are converted to groups in HDF5 file. :param cfg_xls: path to configuration excel file """ for qry_sheet_namei in cfg_xls_sheetnames_required: qry_sheet_df=cfg_xls.parse(qry_sheet_namei) qry_sheet_df=qry_sheet_df.astype(str) # suppress unicode error qry_sheet_df.columns=[col.replace(" ","_") for col in qry_sheet_df.columns] cfg_h5.put("cfg/"+qry_sheet_namei,convert2h5form(qry_sheet_df), format='table', data_columns=True) return cfg_h5 def xls2csvs(cfg_xls,cfg_xls_sheetnames_required,output_dh): """ Converts configuration excel file to HDF5(h5) file. Here sheets in excel files are converted to groups in HDF5 file. :param cfg_xls: path to configuration excel file """ for qry_sheet_namei in cfg_xls_sheetnames_required: qry_sheet_df=cfg_xls.parse(qry_sheet_namei) qry_sheet_df=qry_sheet_df.astype(str) # suppress unicode error qry_sheet_df.to_csv("%s/%s" % (output_dh,qry_sheet_namei)) # print "%s/%s" % (output_dh,qry_sheet_namei) def convert2h5form(df): """ Convert dataframe compatible to Hdf5 format :param df: pandas dataframe """ from dms2dfe.lib.io_strs import convertstr2format df.columns=[convertstr2format(col,"^[a-zA-Z0-9_]*$") for col in df.columns.tolist()] return df def csvs2h5(dh,sub_dh_list,fn_list,output_dh,cfg_h5): """ This converts the csv files to tables in HDF5. :param dh: path to the directory with csv files :param fn_list: list of filenames of the csv files """ for fn in fn_list: for sub_dh in sub_dh_list : # get aas or cds fh=output_dh+"/"+dh+"/"+sub_dh+"/"+fn+"" df=pd.read_csv(fh) # get mat to df df=df.loc[:,[col.replace(" ","_") for col in list(df.columns) if not (('index' in col) or ('Unnamed' in col)) ]] exec("cfg_h5.put('%s/%s/%s',df, format='table', data_columns=True)" % (dh,sub_dh,str(fn)),locals(), globals()) # store the otpts in h5 eg. cds/N/lbl # print("cfg_h5.put('%s/%s/%s',df.convert_objects(), format='table', data_columns=True)" % (dh,sub_dh,str(fn))) # store the otpts in h5 eg. cds/N/lbl def csvs2h5(dh,sub_dh_list,fn_list): """ This converts csvs into HDF5 tables. :param dh: path to the directory with csv files :param fn_list: list of filenames of the csv files """ for fn in fn_list: for sub_dh in sub_dh_list : # get aas or cds fh=output_dh+"/"+dh+"/"+sub_dh+"/"+fn+"" key=dh+"/"+sub_dh+"/"+fn if (exists(fh)) and (key in cfg_h5): df=pd.read_csv(fh) # get mat to df key=key+"2" cfg_h5.put(key,df.convert_objects(), format='table', data_columns=True) # store the otpts in h5 eg. cds/N/lbl #mut_lbl_fit_comparison def getusable_lbls_list(prj_dh): """ This detects the samples that can be processed. :param prj_dh: path to project directory. :returns lbls_list: list of names of samples that can be processed. """ lbls=pd.read_csv(prj_dh+'/cfg/lbls') lbls=lbls.set_index('varname') lbls_list=[] #data_lbl cols: NiA mutids NiS NiN NiNcut NiNcutlog NiScut NiScutlog NiAcut NiAcutlog for lbli,lbl in lbls.iterrows() : # print "%s/data_lbl/%s/%s" % (prj_dh,'aas',str(lbli)) if (not exists("%s/data_lbl/%s/%s" % (prj_dh,'aas',str(lbli)))): fh_1=expanduser(str(lbl['fhs_1'])) lbl_mat_mut_cds_fh=[fh for fh in glob(fh_1+"*") if '.mat_mut_cds' in fh] if len(lbl_mat_mut_cds_fh)!=0: lbl_mat_mut_cds_fh=lbl_mat_mut_cds_fh[0] lbls_list.append([lbli,lbl_mat_mut_cds_fh]) else : fh_1="%s/data_mutmat/%s" % (prj_dh,basename(fh_1)) # print fh_1 lbl_mat_mut_cds_fh=[fh for fh in glob(fh_1+"*") if '.mat_mut_cds' in fh] if len(lbl_mat_mut_cds_fh)!=0: lbl_mat_mut_cds_fh=lbl_mat_mut_cds_fh[0] lbls_list.append([lbli,lbl_mat_mut_cds_fh]) else: logging.warning("can not find: %s" % fh_1) # else: # logging.info("already processed: %s" % (str(lbli))) return lbls_list def getusable_fits_list(prj_dh,data_fit_dh='data_fit'): """ This gets the list of samples that can be processed for fitness estimations. :param prj_dh: path to project directory. :returns fits_pairs_list: list of tuples with names of input and selected samples. """ if exists('%s/cfg/fit'% (prj_dh)): fits=pd.read_csv(prj_dh+'/cfg/fit') if "Unnamed: 0" in fits.columns: fits=fits.drop("Unnamed: 0", axis=1) fits_pairs_list=[] sel_cols=[col for col in fits.columns.tolist() if "sel_" in col] for pairi in fits.index.values : unsel_lbl=fits.loc[pairi,"unsel"] sels=list(fits.loc[pairi,sel_cols]) # print sels for sel_lbl in sels : if not pd.isnull(sel_lbl): fit_lbl=sel_lbl+"_WRT_"+unsel_lbl if (not exists("%s/%s/%s/%s" % (prj_dh,data_fit_dh,'aas',fit_lbl))): fits_pairs_list.append([unsel_lbl,sel_lbl]) else : logging.info("already processed: %s" % (fit_lbl)) return fits_pairs_list else: logging.warning("ana3_mutmat2fit : getusable_fits_list : not fits in cfg/fit") return [] def getusable_comparison_list(prj_dh): """ This converts the table of tests and controls in configuration file into tuples of test and control. :param prj_dh: path to project directory. """ comparisons=pd.read_csv(prj_dh+'/cfg/comparison') comparisons=comparisons.set_index('ctrl') comparison_list=[] for ctrl,row in comparisons.iterrows() : row=row[~row.isnull()] for test in row[0:] : comparison_list.append([ctrl,test]) return comparison_list def to_pkl(data,fh): """ Saves a dict in pkl format :param data: dict, containing data :param fh: path to the output pkl file """ if not fh is None: with open(fh, 'wb') as f: pickle.dump(data, f, -1) def read_pkl(fh): """ Reads a file in pkl format :param fh: path to the pkl file :returns data: dict, containing data """ with open(fh,'rb') as f: return pickle.load(f)
gpl-3.0
-6,398,850,885,917,340,000
-5,767,345,043,204,563,000
38.778976
169
0.596151
false
citrix-openstack-build/neutron-vpnaas
tools/install_venv.py
102
2304
#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh <your command> Also, make test will automatically use the virtualenv. """ print(help) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv)
apache-2.0
-6,310,816,014,373,771,000
-2,329,746,108,414,587,400
31
79
0.711806
false
jsjohnst/tornado
tornado/httpserver.py
96
11915
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded HTTP server. Typical applications have little direct interaction with the `HTTPServer` class except to start a server at the beginning of the process (and even that is often done indirectly via `tornado.web.Application.listen`). .. versionchanged:: 4.0 The ``HTTPRequest`` class that used to live in this module has been moved to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. """ from __future__ import absolute_import, division, print_function, with_statement import socket from tornado.escape import native_str from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters from tornado import gen from tornado import httputil from tornado import iostream from tornado import netutil from tornado.tcpserver import TCPServer from tornado.util import Configurable class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate): r"""A non-blocking, single-threaded HTTP server. A server is defined by a subclass of `.HTTPServerConnectionDelegate`, or, for backwards compatibility, a callback that takes an `.HTTPServerRequest` as an argument. The delegate is usually a `tornado.web.Application`. `HTTPServer` supports keep-alive connections by default (automatically for HTTP/1.1, or for HTTP/1.0 when the client requests ``Connection: keep-alive``). If ``xheaders`` is ``True``, we support the ``X-Real-Ip``/``X-Forwarded-For`` and ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the remote IP and URI scheme/protocol for all requests. These headers are useful when running Tornado behind a reverse proxy or load balancer. The ``protocol`` argument can also be set to ``https`` if Tornado is run behind an SSL-decoding proxy that does not set one of the supported ``xheaders``. To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) HTTPServer(applicaton, ssl_options=ssl_ctx) `HTTPServer` initialization follows one of three patterns (the initialization methods are defined on `tornado.tcpserver.TCPServer`): 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: server = HTTPServer(app) server.listen(8888) IOLoop.current().start() In many cases, `tornado.web.Application.listen` can be used to avoid the need to explicitly create the `HTTPServer`. 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: simple multi-process:: server = HTTPServer(app) server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `HTTPServer` constructor. `~.TCPServer.start` will always start the server on the default singleton `.IOLoop`. 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: sockets = tornado.netutil.bind_sockets(8888) tornado.process.fork_processes(0) server = HTTPServer(app) server.add_sockets(sockets) IOLoop.current().start() The `~.TCPServer.add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `~.TCPServer.add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `tornado.netutil.bind_sockets`. .. versionchanged:: 4.0 Added ``decompress_request``, ``chunk_size``, ``max_header_size``, ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` arguments. Added support for `.HTTPServerConnectionDelegate` instances as ``request_callback``. .. versionchanged:: 4.1 `.HTTPServerConnectionDelegate.start_request` is now called with two arguments ``(server_conn, request_conn)`` (in accordance with the documentation) instead of one ``(request_conn)``. .. versionchanged:: 4.2 `HTTPServer` is now a subclass of `tornado.util.Configurable`. """ def __init__(self, *args, **kwargs): # Ignore args to __init__; real initialization belongs in # initialize since we're Configurable. (there's something # weird in initialization order between this class, # Configurable, and TCPServer so we can't leave __init__ out # completely) pass def initialize(self, request_callback, no_keep_alive=False, io_loop=None, xheaders=False, ssl_options=None, protocol=None, decompress_request=False, chunk_size=None, max_header_size=None, idle_connection_timeout=None, body_timeout=None, max_body_size=None, max_buffer_size=None): self.request_callback = request_callback self.no_keep_alive = no_keep_alive self.xheaders = xheaders self.protocol = protocol self.conn_params = HTTP1ConnectionParameters( decompress=decompress_request, chunk_size=chunk_size, max_header_size=max_header_size, header_timeout=idle_connection_timeout or 3600, max_body_size=max_body_size, body_timeout=body_timeout) TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size, read_chunk_size=chunk_size) self._connections = set() @classmethod def configurable_base(cls): return HTTPServer @classmethod def configurable_default(cls): return HTTPServer @gen.coroutine def close_all_connections(self): while self._connections: # Peek at an arbitrary element of the set conn = next(iter(self._connections)) yield conn.close() def handle_stream(self, stream, address): context = _HTTPRequestContext(stream, address, self.protocol) conn = HTTP1ServerConnection( stream, self.conn_params, context) self._connections.add(conn) conn.start_serving(self) def start_request(self, server_conn, request_conn): return _ServerRequestAdapter(self, server_conn, request_conn) def on_close(self, server_conn): self._connections.remove(server_conn) class _HTTPRequestContext(object): def __init__(self, stream, address, protocol): self.address = address # Save the socket's address family now so we know how to # interpret self.address even after the stream is closed # and its socket attribute replaced with None. if stream.socket is not None: self.address_family = stream.socket.family else: self.address_family = None # In HTTPServerRequest we want an IP, not a full socket address. if (self.address_family in (socket.AF_INET, socket.AF_INET6) and address is not None): self.remote_ip = address[0] else: # Unix (or other) socket; fake the remote address. self.remote_ip = '0.0.0.0' if protocol: self.protocol = protocol elif isinstance(stream, iostream.SSLIOStream): self.protocol = "https" else: self.protocol = "http" self._orig_remote_ip = self.remote_ip self._orig_protocol = self.protocol def __str__(self): if self.address_family in (socket.AF_INET, socket.AF_INET6): return self.remote_ip elif isinstance(self.address, bytes): # Python 3 with the -bb option warns about str(bytes), # so convert it explicitly. # Unix socket addresses are str on mac but bytes on linux. return native_str(self.address) else: return str(self.address) def _apply_xheaders(self, headers): """Rewrite the ``remote_ip`` and ``protocol`` fields.""" # Squid uses X-Forwarded-For, others use X-Real-Ip ip = headers.get("X-Forwarded-For", self.remote_ip) ip = ip.split(',')[-1].strip() ip = headers.get("X-Real-Ip", ip) if netutil.is_valid_ip(ip): self.remote_ip = ip # AWS uses X-Forwarded-Proto proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)) if proto_header in ("http", "https"): self.protocol = proto_header def _unapply_xheaders(self): """Undo changes from `_apply_xheaders`. Xheaders are per-request so they should not leak to the next request on the same connection. """ self.remote_ip = self._orig_remote_ip self.protocol = self._orig_protocol class _ServerRequestAdapter(httputil.HTTPMessageDelegate): """Adapts the `HTTPMessageDelegate` interface to the interface expected by our clients. """ def __init__(self, server, server_conn, request_conn): self.server = server self.connection = request_conn self.request = None if isinstance(server.request_callback, httputil.HTTPServerConnectionDelegate): self.delegate = server.request_callback.start_request( server_conn, request_conn) self._chunks = None else: self.delegate = None self._chunks = [] def headers_received(self, start_line, headers): if self.server.xheaders: self.connection.context._apply_xheaders(headers) if self.delegate is None: self.request = httputil.HTTPServerRequest( connection=self.connection, start_line=start_line, headers=headers) else: return self.delegate.headers_received(start_line, headers) def data_received(self, chunk): if self.delegate is None: self._chunks.append(chunk) else: return self.delegate.data_received(chunk) def finish(self): if self.delegate is None: self.request.body = b''.join(self._chunks) self.request._parse_body() self.server.request_callback(self.request) else: self.delegate.finish() self._cleanup() def on_connection_close(self): if self.delegate is None: self._chunks = None else: self.delegate.on_connection_close() self._cleanup() def _cleanup(self): if self.server.xheaders: self.connection.context._unapply_xheaders() HTTPRequest = httputil.HTTPServerRequest
apache-2.0
541,733,646,049,881,150
-4,319,623,675,377,579,000
38.194079
84
0.641376
false
helifu/kudu
python/kudu/tests/test_scanner.py
2
14089
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import division from kudu.compat import unittest from kudu.tests.util import TestScanBase from kudu.tests.common import KuduTestBase, TimeoutError import kudu import datetime import time import pytest class TestScanner(TestScanBase): @classmethod def setUpClass(self): super(TestScanner, self).setUpClass() def setUp(self): pass def test_scan_rows_basic(self): # Let's scan with no predicates scanner = self.table.scanner().open() tuples = scanner.read_all_tuples() self.assertEqual(sorted(tuples), self.tuples) def test_scan_rows_simple_predicate(self): key = self.table['key'] preds = [key > 19, key < 50] def _read_predicates(preds): scanner = self.table.scanner() scanner.add_predicates(preds) scanner.open() return scanner.read_all_tuples() tuples = _read_predicates(preds) self.assertEqual(sorted(tuples), self.tuples[20:50]) # verify predicates reusable tuples = _read_predicates(preds) self.assertEqual(sorted(tuples), self.tuples[20:50]) def test_scan_limit(self): # Set limits both below and above the max number of rows. limits = [self.nrows - 1, self.nrows, self.nrows + 1] for limit in limits: scanner = self.table.scanner() scanner.set_limit(limit) tuples = scanner.read_all_tuples() self.assertEqual(len(tuples), min(limit, self.nrows)) def test_scan_rows_string_predicate_and_projection(self): scanner = self.table.scanner() scanner.set_projected_column_names(['key', 'string_val']) sv = self.table['string_val'] scanner.add_predicates([sv >= 'hello_20', sv <= 'hello_22']) scanner.set_fault_tolerant() scanner.open() tuples = scanner.read_all_tuples() self.assertEqual(sorted(tuples), [(20, 'hello_20'), (22, 'hello_22')]) def test_scan_rows_in_list_predicate(self): """ Test scanner with an InList predicate and a string comparison predicate """ key_list = [2, 98] scanner = self.table.scanner() scanner.set_fault_tolerant()\ .add_predicates([ self.table[0].in_list(key_list), self.table['string_val'] >= 'hello_9' ]) scanner.open() tuples = scanner.read_all_tuples() self.assertEqual(tuples, [self.tuples[98]]) def test_scan_rows_is_not_null_predicate(self): """ Test scanner with an IsNotNull predicate on string_val column """ pred = self.table['string_val'].is_not_null() scanner = self.table.scanner() scanner.add_predicate(pred) scanner.open() tuples = scanner.read_all_tuples() rows = [i for i in range(100) if i % 2 == 0] self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows]) def test_scan_rows_is_null_predicate(self): """ Test scanner with an IsNull predicate on string_val column """ pred = self.table['string_val'].is_null() scanner = self.table.scanner() scanner.add_predicate(pred) scanner.open() tuples = scanner.read_all_tuples() rows = [i for i in range(100) if i % 2 != 0] self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows]) def test_index_projection_with_schema(self): scanner = self.table.scanner() scanner.set_projected_column_indexes([0, 1]) scanner.set_fault_tolerant() scanner.open() tuples = scanner.read_all_tuples() # Build schema to check against builder = kudu.schema_builder() builder.add_column('key', kudu.int32, nullable=False) builder.add_column('int_val', kudu.int32) builder.set_primary_keys(['key']) expected_schema = builder.build() # Build new schema from projection schema builder = kudu.schema_builder() for col in scanner.get_projection_schema(): builder.copy_column(col) builder.set_primary_keys(['key']) new_schema = builder.build() self.assertEqual(tuples, [t[0:2] for t in self.tuples]) self.assertTrue(expected_schema.equals(new_schema)) def test_scan_with_bounds(self): scanner = self.table.scanner() scanner.set_fault_tolerant()\ .add_lower_bound({'key': 50})\ .add_exclusive_upper_bound({'key': 55}) scanner.open() tuples = scanner.read_all_tuples() self.assertEqual(sorted(tuples), self.tuples[50:55]) def test_scan_invalid_predicates(self): scanner = self.table.scanner() sv = self.table['string_val'] with self.assertRaises(TypeError): scanner.add_predicates([sv >= None]) with self.assertRaises(TypeError): scanner.add_predicates([sv >= 1]) with self.assertRaises(TypeError): scanner.add_predicates([sv.in_list(['testing', datetime.datetime.utcnow()])]) with self.assertRaises(TypeError): scanner.add_predicates([sv.in_list([ 'hello_20', 120 ])]) def test_scan_batch_by_batch(self): scanner = self.table.scanner() scanner.set_fault_tolerant() lower_bound = scanner.new_bound() lower_bound['key'] = 10 scanner.add_lower_bound(lower_bound) upper_bound = scanner.new_bound() upper_bound['key'] = 90 scanner.add_exclusive_upper_bound(upper_bound) scanner.open() tuples = [] while scanner.has_more_rows(): batch = scanner.next_batch() tuples.extend(batch.as_tuples()) self.assertEqual(sorted(tuples), self.tuples[10:90]) def test_unixtime_micros(self): """ Test setting and getting unixtime_micros fields """ # Insert new rows self.insert_new_unixtime_micros_rows() # Validate results scanner = self.table.scanner() scanner.set_fault_tolerant().open() self.assertEqual(sorted(self.tuples), scanner.read_all_tuples()) def test_read_mode(self): """ Test scanning in latest, snapshot and read_your_writes read modes. """ # Delete row self.delete_insert_row_for_read_test() # Check scanner results prior to delete scanner = self.table.scanner() scanner.set_read_mode('snapshot')\ .set_snapshot(self.snapshot_timestamp)\ .open() self.assertEqual(sorted(self.tuples[1:]), sorted(scanner.read_all_tuples())) # Check scanner results after delete with latest mode timeout = time.time() + 10 check_tuples = [] while check_tuples != sorted(self.tuples): if time.time() > timeout: raise TimeoutError("Could not validate results in allocated" + "time.") scanner = self.table.scanner() scanner.set_read_mode(kudu.READ_LATEST)\ .open() check_tuples = sorted(scanner.read_all_tuples()) # Avoid tight looping time.sleep(0.05) # Check scanner results after delete with read_your_writes mode scanner = self.table.scanner() scanner.set_read_mode('read_your_writes')\ .open() self.assertEqual(sorted(self.tuples), sorted(scanner.read_all_tuples())) def test_resource_metrics_and_cache_blocks(self): """ Test getting the resource metrics after scanning and setting the scanner to not cache blocks. """ # Build scanner and read through all batches and retrieve metrics. scanner = self.table.scanner() scanner.set_fault_tolerant().set_cache_blocks(False).open() scanner.read_all_tuples() metrics = scanner.get_resource_metrics() # Confirm that the scanner returned cache hit and miss values. self.assertTrue('cfile_cache_hit_bytes' in metrics) self.assertTrue('cfile_cache_miss_bytes' in metrics) def verify_pred_type_scans(self, preds, row_indexes, count_only=False): # Using the incoming list of predicates, verify that the row returned # matches the inserted tuple at the row indexes specified in a # slice object scanner = self.type_table.scanner() scanner.set_fault_tolerant() scanner.add_predicates(preds) scanner.set_projected_column_names(self.projected_names_w_o_float) tuples = scanner.open().read_all_tuples() # verify rows if count_only: self.assertEqual(len(self.type_test_rows[row_indexes]), len(tuples)) else: self.assertEqual(sorted(self.type_test_rows[row_indexes]), tuples) def test_unixtime_micros_pred(self): # Test unixtime_micros value predicate self._test_unixtime_micros_pred() def test_bool_pred(self): # Test a boolean value predicate self._test_bool_pred() def test_double_pred(self): # Test a double precision float predicate self._test_double_pred() def test_float_pred(self): # Test a single precision float predicate # Does a row check count only self._test_float_pred() def test_decimal_pred(self): if kudu.CLIENT_SUPPORTS_DECIMAL: # Test a decimal predicate self._test_decimal_pred() def test_binary_pred(self): # Test a binary predicate self._test_binary_pred() def test_scan_selection(self): """ This test confirms that setting the scan selection policy on the scanner does not cause any errors. There is no way to confirm that the policy was actually set. This functionality is tested in the C++ test: ClientTest.TestReplicatedMultiTabletTableFailover. """ for policy in ['leader', kudu.CLOSEST_REPLICA, 2]: scanner = self.table.scanner() scanner.set_selection(policy) scanner.open() self.assertEqual(sorted(scanner.read_all_tuples()), sorted(self.tuples)) @pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS), reason="Pandas required to run this test.") def test_scanner_to_pandas_types(self): """ This test confirms that data types are converted as expected to Pandas. """ import numpy as np scanner = self.type_table.scanner() df = scanner.to_pandas() types = df.dtypes if kudu.CLIENT_SUPPORTS_DECIMAL: self.assertEqual(types[0], np.int64) self.assertEqual(types[1], 'datetime64[ns, UTC]') self.assertEqual(types[2], np.object) self.assertEqual(types[3], np.object) self.assertEqual(types[4], np.bool) self.assertEqual(types[5], np.float64) self.assertEqual(types[6], np.int8) self.assertEqual(types[7], np.object) self.assertEqual(types[8], np.float32) else: self.assertEqual(types[0], np.int64) self.assertEqual(types[1], 'datetime64[ns, UTC]') self.assertEqual(types[2], np.object) self.assertEqual(types[3], np.bool) self.assertEqual(types[4], np.float64) self.assertEqual(types[5], np.int8) self.assertEqual(types[6], np.object) self.assertEqual(types[7], np.float32) @pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS), reason="Pandas required to run this test.") def test_scanner_to_pandas_row_count(self): """ This test confirms that the record counts match between Pandas and the scanner. """ scanner = self.type_table.scanner() scanner_count = len(scanner.read_all_tuples()) scanner = self.type_table.scanner() df = scanner.to_pandas() self.assertEqual(scanner_count, df.shape[0]) @pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS), reason="Pandas required to run this test.") def test_scanner_to_pandas_index(self): """ This test confirms that an index is correctly applied. """ scanner = self.type_table.scanner() df = scanner.to_pandas(index='key') self.assertEqual(df.index.name, 'key') self.assertEqual(list(df.index), [1, 2]) @pytest.mark.skipif((not(kudu.CLIENT_SUPPORTS_PANDAS) or (not(kudu.CLIENT_SUPPORTS_DECIMAL))), reason="Pandas and Decimal support required to run this test.") def test_scanner_to_pandas_index(self): """ This test confirms that a decimal column is coerced to a double when specified. """ import numpy as np scanner = self.type_table.scanner() df = scanner.to_pandas(coerce_float=True) types = df.dtypes self.assertEqual(types[2], np.float64)
apache-2.0
-276,991,004,454,077,860
5,055,410,154,559,136,000
34.578283
87
0.605153
false
nikolhm/Pokus
knownpaths.py
1
9583
import ctypes, sys from ctypes import windll, wintypes from uuid import UUID class GUID(ctypes.Structure): # [1] _fields_ = [ ("Data1", wintypes.DWORD), ("Data2", wintypes.WORD), ("Data3", wintypes.WORD), ("Data4", wintypes.BYTE * 8) ] def __init__(self, uuid_): ctypes.Structure.__init__(self) self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid_.fields for i in range(2, 8): self.Data4[i] = rest>>(8 - i - 1)*8 & 0xff class FOLDERID: # [2] AccountPictures = UUID('{008ca0b1-55b4-4c56-b8a8-4de4b299d3be}') AdminTools = UUID('{724EF170-A42D-4FEF-9F26-B60E846FBA4F}') ApplicationShortcuts = UUID('{A3918781-E5F2-4890-B3D9-A7E54332328C}') CameraRoll = UUID('{AB5FB87B-7CE2-4F83-915D-550846C9537B}') CDBurning = UUID('{9E52AB10-F80D-49DF-ACB8-4330F5687855}') CommonAdminTools = UUID('{D0384E7D-BAC3-4797-8F14-CBA229B392B5}') CommonOEMLinks = UUID('{C1BAE2D0-10DF-4334-BEDD-7AA20B227A9D}') CommonPrograms = UUID('{0139D44E-6AFE-49F2-8690-3DAFCAE6FFB8}') CommonStartMenu = UUID('{A4115719-D62E-491D-AA7C-E74B8BE3B067}') CommonStartup = UUID('{82A5EA35-D9CD-47C5-9629-E15D2F714E6E}') CommonTemplates = UUID('{B94237E7-57AC-4347-9151-B08C6C32D1F7}') Contacts = UUID('{56784854-C6CB-462b-8169-88E350ACB882}') Cookies = UUID('{2B0F765D-C0E9-4171-908E-08A611B84FF6}') Desktop = UUID('{B4BFCC3A-DB2C-424C-B029-7FE99A87C641}') DeviceMetadataStore = UUID('{5CE4A5E9-E4EB-479D-B89F-130C02886155}') Documents = UUID('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}') DocumentsLibrary = UUID('{7B0DB17D-9CD2-4A93-9733-46CC89022E7C}') Downloads = UUID('{374DE290-123F-4565-9164-39C4925E467B}') Favorites = UUID('{1777F761-68AD-4D8A-87BD-30B759FA33DD}') Fonts = UUID('{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}') GameTasks = UUID('{054FAE61-4DD8-4787-80B6-090220C4B700}') History = UUID('{D9DC8A3B-B784-432E-A781-5A1130A75963}') ImplicitAppShortcuts = UUID('{BCB5256F-79F6-4CEE-B725-DC34E402FD46}') InternetCache = UUID('{352481E8-33BE-4251-BA85-6007CAEDCF9D}') Libraries = UUID('{1B3EA5DC-B587-4786-B4EF-BD1DC332AEAE}') Links = UUID('{bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968}') LocalAppData = UUID('{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}') LocalAppDataLow = UUID('{A520A1A4-1780-4FF6-BD18-167343C5AF16}') LocalizedResourcesDir = UUID('{2A00375E-224C-49DE-B8D1-440DF7EF3DDC}') Music = UUID('{4BD8D571-6D19-48D3-BE97-422220080E43}') MusicLibrary = UUID('{2112AB0A-C86A-4FFE-A368-0DE96E47012E}') NetHood = UUID('{C5ABBF53-E17F-4121-8900-86626FC2C973}') OriginalImages = UUID('{2C36C0AA-5812-4b87-BFD0-4CD0DFB19B39}') PhotoAlbums = UUID('{69D2CF90-FC33-4FB7-9A0C-EBB0F0FCB43C}') PicturesLibrary = UUID('{A990AE9F-A03B-4E80-94BC-9912D7504104}') Pictures = UUID('{33E28130-4E1E-4676-835A-98395C3BC3BB}') Playlists = UUID('{DE92C1C7-837F-4F69-A3BB-86E631204A23}') PrintHood = UUID('{9274BD8D-CFD1-41C3-B35E-B13F55A758F4}') Profile = UUID('{5E6C858F-0E22-4760-9AFE-EA3317B67173}') ProgramData = UUID('{62AB5D82-FDC1-4DC3-A9DD-070D1D495D97}') ProgramFiles = UUID('{905e63b6-c1bf-494e-b29c-65b732d3d21a}') ProgramFilesX64 = UUID('{6D809377-6AF0-444b-8957-A3773F02200E}') ProgramFilesX86 = UUID('{7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E}') ProgramFilesCommon = UUID('{F7F1ED05-9F6D-47A2-AAAE-29D317C6F066}') ProgramFilesCommonX64 = UUID('{6365D5A7-0F0D-45E5-87F6-0DA56B6A4F7D}') ProgramFilesCommonX86 = UUID('{DE974D24-D9C6-4D3E-BF91-F4455120B917}') Programs = UUID('{A77F5D77-2E2B-44C3-A6A2-ABA601054A51}') Public = UUID('{DFDF76A2-C82A-4D63-906A-5644AC457385}') PublicDesktop = UUID('{C4AA340D-F20F-4863-AFEF-F87EF2E6BA25}') PublicDocuments = UUID('{ED4824AF-DCE4-45A8-81E2-FC7965083634}') PublicDownloads = UUID('{3D644C9B-1FB8-4f30-9B45-F670235F79C0}') PublicGameTasks = UUID('{DEBF2536-E1A8-4c59-B6A2-414586476AEA}') PublicLibraries = UUID('{48DAF80B-E6CF-4F4E-B800-0E69D84EE384}') PublicMusic = UUID('{3214FAB5-9757-4298-BB61-92A9DEAA44FF}') PublicPictures = UUID('{B6EBFB86-6907-413C-9AF7-4FC2ABF07CC5}') PublicRingtones = UUID('{E555AB60-153B-4D17-9F04-A5FE99FC15EC}') PublicUserTiles = UUID('{0482af6c-08f1-4c34-8c90-e17ec98b1e17}') PublicVideos = UUID('{2400183A-6185-49FB-A2D8-4A392A602BA3}') QuickLaunch = UUID('{52a4f021-7b75-48a9-9f6b-4b87a210bc8f}') Recent = UUID('{AE50C081-EBD2-438A-8655-8A092E34987A}') RecordedTVLibrary = UUID('{1A6FDBA2-F42D-4358-A798-B74D745926C5}') ResourceDir = UUID('{8AD10C31-2ADB-4296-A8F7-E4701232C972}') Ringtones = UUID('{C870044B-F49E-4126-A9C3-B52A1FF411E8}') RoamingAppData = UUID('{3EB685DB-65F9-4CF6-A03A-E3EF65729F3D}') RoamedTileImages = UUID('{AAA8D5A5-F1D6-4259-BAA8-78E7EF60835E}') RoamingTiles = UUID('{00BCFC5A-ED94-4e48-96A1-3F6217F21990}') SampleMusic = UUID('{B250C668-F57D-4EE1-A63C-290EE7D1AA1F}') SamplePictures = UUID('{C4900540-2379-4C75-844B-64E6FAF8716B}') SamplePlaylists = UUID('{15CA69B3-30EE-49C1-ACE1-6B5EC372AFB5}') SampleVideos = UUID('{859EAD94-2E85-48AD-A71A-0969CB56A6CD}') SavedGames = UUID('{4C5C32FF-BB9D-43b0-B5B4-2D72E54EAAA4}') SavedSearches = UUID('{7d1d3a04-debb-4115-95cf-2f29da2920da}') Screenshots = UUID('{b7bede81-df94-4682-a7d8-57a52620b86f}') SearchHistory = UUID('{0D4C3DB6-03A3-462F-A0E6-08924C41B5D4}') SearchTemplates = UUID('{7E636BFE-DFA9-4D5E-B456-D7B39851D8A9}') SendTo = UUID('{8983036C-27C0-404B-8F08-102D10DCFD74}') SidebarDefaultParts = UUID('{7B396E54-9EC5-4300-BE0A-2482EBAE1A26}') SidebarParts = UUID('{A75D362E-50FC-4fb7-AC2C-A8BEAA314493}') SkyDrive = UUID('{A52BBA46-E9E1-435f-B3D9-28DAA648C0F6}') SkyDriveCameraRoll = UUID('{767E6811-49CB-4273-87C2-20F355E1085B}') SkyDriveDocuments = UUID('{24D89E24-2F19-4534-9DDE-6A6671FBB8FE}') SkyDrivePictures = UUID('{339719B5-8C47-4894-94C2-D8F77ADD44A6}') StartMenu = UUID('{625B53C3-AB48-4EC1-BA1F-A1EF4146FC19}') Startup = UUID('{B97D20BB-F46A-4C97-BA10-5E3608430854}') System = UUID('{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}') SystemX86 = UUID('{D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27}') Templates = UUID('{A63293E8-664E-48DB-A079-DF759E0509F7}') UserPinned = UUID('{9E3995AB-1F9C-4F13-B827-48B24B6C7174}') UserProfiles = UUID('{0762D272-C50A-4BB0-A382-697DCD729B80}') UserProgramFiles = UUID('{5CD7AEE2-2219-4A67-B85D-6C9CE15660CB}') UserProgramFilesCommon = UUID('{BCBD3057-CA5C-4622-B42D-BC56DB0AE516}') Videos = UUID('{18989B1D-99B5-455B-841C-AB7C74E4DDFC}') VideosLibrary = UUID('{491E922F-5643-4AF4-A7EB-4E7A138D8174}') Windows = UUID('{F38BF404-1D43-42F2-9305-67DE0B28FC23}') class UserHandle: # [3] current = wintypes.HANDLE(0) common = wintypes.HANDLE(-1) _CoTaskMemFree = windll.ole32.CoTaskMemFree # [4] _CoTaskMemFree.restype= None _CoTaskMemFree.argtypes = [ctypes.c_void_p] _SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath # [5] [3] _SHGetKnownFolderPath.argtypes = [ ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p) ] class PathNotFoundException(Exception): pass def get_path(folderid, user_handle=UserHandle.common): fid = GUID(folderid) pPath = ctypes.c_wchar_p() S_OK = 0 if _SHGetKnownFolderPath(ctypes.byref(fid), 0, user_handle, ctypes.byref(pPath)) != S_OK: raise PathNotFoundException() path = pPath.value _CoTaskMemFree(pPath) return path if __name__ == '__main__': if len(sys.argv) < 2 or sys.argv[1] in ['-?', '/?']: print('python knownpaths.py FOLDERID {current|common}') sys.exit(0) try: folderid = getattr(FOLDERID, sys.argv[1]) except AttributeError: print('Unknown folder id "%s"' % sys.argv[1], file=sys.stderr) sys.exit(1) try: if len(sys.argv) == 2: print(get_path(folderid)) else: print(get_path(folderid, getattr(UserHandle, sys.argv[2]))) except PathNotFoundException: print('Folder not found "%s"' % ' '.join(sys.argv[1:]), file=sys.stderr) sys.exit(1) # [1] http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx # [2] http://msdn.microsoft.com/en-us/library/windows/desktop/dd378457.aspx # [3] http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188.aspx # [4] http://msdn.microsoft.com/en-us/library/windows/desktop/ms680722.aspx # [5] http://www.themacaque.com/?p=954
mit
-5,245,068,061,770,229,000
6,029,275,330,665,242,000
57.432927
93
0.627883
false
Mustard-Systems-Ltd/pyzmq
perf/perf.py
6
5316
#!/usr/bin/env python # coding: utf-8 # Copyright (C) PyZMQ Developers # Distributed under the terms of the Modified BSD License. # # Some original test code Copyright (c) 2007-2010 iMatix Corporation, # Used under LGPLv3 import argparse import time from multiprocessing import Process import zmq def parse_args(argv=None): parser = argparse.ArgumentParser(description='Run a zmq performance test') parser.add_argument('-p', '--poll', action='store_true', help='use a zmq Poller instead of raw send/recv') parser.add_argument('-c', '--copy', action='store_true', help='copy messages instead of using zero-copy') parser.add_argument('-s', '--size', type=int, default=10240, help='size (in bytes) of the test message') parser.add_argument('-n', '--count', type=int, default=10240, help='number of test messages to send') parser.add_argument('--url', dest='url', type=str, default='tcp://127.0.0.1:5555', help='the zmq URL on which to run the test') parser.add_argument(dest='test', type=str, default='lat', choices=['lat', 'thr'], help='which test to run') return parser.parse_args(argv) def latency_echo(url, count, poll, copy): """echo messages on a REP socket Should be started before `latency` """ ctx = zmq.Context() s = ctx.socket(zmq.REP) if poll: p = zmq.Poller() p.register(s) s.bind(url) block = zmq.NOBLOCK if poll else 0 for i in range(count): if poll: res = p.poll() msg = s.recv(block, copy=copy) if poll: res = p.poll() s.send(msg, block, copy=copy) msg = s.recv() assert msg == b'done' s.close() ctx.term() def latency(url, count, size, poll, copy): """Perform a latency test""" ctx = zmq.Context() s = ctx.socket(zmq.REQ) s.setsockopt(zmq.LINGER, -1) s.connect(url) if poll: p = zmq.Poller() p.register(s) msg = b' ' * size watch = zmq.Stopwatch() block = zmq.NOBLOCK if poll else 0 time.sleep(1) watch.start() for i in range (0, count): if poll: res = p.poll() assert(res[0][1] & zmq.POLLOUT) s.send(msg, block, copy=copy) if poll: res = p.poll() assert(res[0][1] & zmq.POLLIN) msg = s.recv(block, copy=copy) assert len(msg) == size elapsed = watch.stop() s.send(b'done') latency = elapsed / (count * 2.) print ("message size : %8i [B]" % (size, )) print ("roundtrip count: %8i [msgs]" % (count, )) print ("mean latency : %12.3f [µs]" % (latency, )) print ("test time : %12.3f [s]" % (elapsed * 1e-6, )) def pusher(url, count, size, copy, poll): """send a bunch of messages on a PUSH socket""" ctx = zmq.Context() s = ctx.socket(zmq.PUSH) # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s) s.connect(url) msg = zmq.Message(b' ' * size) block = zmq.NOBLOCK if poll else 0 for i in range(count): if poll: res = p.poll() assert(res[0][1] & zmq.POLLOUT) s.send(msg, block, copy=copy) s.close() ctx.term() def throughput(url, count, size, poll, copy): """recv a bunch of messages on a PULL socket Should be started before `pusher` """ ctx = zmq.Context() s = ctx.socket(zmq.PULL) # Add your socket options here. # For example ZMQ_RATE, ZMQ_RECOVERY_IVL and ZMQ_MCAST_LOOP for PGM. if poll: p = zmq.Poller() p.register(s) s.bind(url) watch = zmq.Stopwatch() block = zmq.NOBLOCK if poll else 0 # Wait for the other side to connect. msg = s.recv() assert len (msg) == size watch.start() for i in range (count-1): if poll: res = p.poll() msg = s.recv(block, copy=copy) elapsed = watch.stop() if elapsed == 0: elapsed = 1 throughput = (1e6 * float(count)) / float(elapsed) megabits = float(throughput * size * 8) / 1e6 print ("message size : %8i [B]" % (size, )) print ("message count : %8i [msgs]" % (count, )) print ("mean throughput: %8.0f [msg/s]" % (throughput, )) print ("mean throughput: %12.3f [Mb/s]" % (megabits, )) print ("test time : %12.3f [s]" % (elapsed * 1e-6, )) def main(): args = parse_args() tic = time.time() if args.test == 'lat': bg = Process(target=latency_echo, args=(args.url, args.count, args.poll, args.copy)) bg.start() latency(args.url, args.count, args.size, args.poll, args.copy) elif args.test == 'thr': bg = Process(target=throughput, args=(args.url, args.count, args.size, args.poll, args.copy)) bg.start() pusher(args.url, args.count, args.size, args.poll, args.copy) bg.join() toc = time.time() if (toc - tic) < 3: print ("For best results, tests should take at least a few seconds.") if __name__ == '__main__': main()
bsd-3-clause
359,265,347,042,992,900
4,857,381,420,009,428,000
26.53886
101
0.555221
false
martinbuc/missionplanner
Lib/lib2to3/pgen2/parse.py
68
8254
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ # Local imports from . import token class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__(self, msg, type, value, context): Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)) self.msg = msg self.type = type self.value = value self.context = context class Parser(object): """Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing <for each input token>: if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). """ def __init__(self, grammar, convert=None): """Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. """ self.grammar = grammar self.convert = convert or (lambda grammar, node: node) def setup(self, start=None): """Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. """ if start is None: start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set() # Aliased to self.rootnode.used_names in pop() def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context) def classify(self, type, value, context): """Turn a token into a label. (Internal)""" if type == token.NAME: # Keep a listing of all used names self.used_names.add(value) # Check for reserved words ilabel = self.grammar.keywords.get(value) if ilabel is not None: return ilabel ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) return ilabel def shift(self, type, value, newstate, context): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, value, context, None) newnode = self.convert(self.grammar, newnode) if newnode is not None: node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) def push(self, type, newdfa, newstate, context): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, None, context, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) def pop(self): """Pop a nonterminal. (Internal)""" popdfa, popstate, popnode = self.stack.pop() newnode = self.convert(self.grammar, popnode) if newnode is not None: if self.stack: dfa, state, node = self.stack[-1] node[-1].append(newnode) else: self.rootnode = newnode self.rootnode.used_names = self.used_names
gpl-3.0
-360,290,537,955,004,000
-8,000,978,913,638,609,000
39.064677
78
0.561425
false
AndreaCrotti/offlineimap
docs/dev-doc-src/conf.py
11
6621
# -*- coding: utf-8 -*- # # pyDNS documentation build configuration file, created by # sphinx-quickstart on Tue Feb 2 10:00:47 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0,os.path.abspath('../..')) from offlineimap import __version__,__author__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo'] autoclass_content = "both" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'OfflineImap' copyright = u'2002-2010, ' + __author__ # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['html'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'dev-doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'offlineimap.tex', u'OfflineImap Documentation', u'OfflineImap contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
gpl-2.0
-1,489,587,785,708,765,700
6,146,140,061,475,513,000
32.105
102
0.713789
false
jaysonkelly/Marlin
buildroot/share/scripts/createTemperatureLookupMarlin.py
89
6252
#!/usr/bin/python """Thermistor Value Lookup Table Generator Generates lookup to temperature values for use in a microcontroller in C format based on: http://en.wikipedia.org/wiki/Steinhart-Hart_equation The main use is for Arduino programs that read data from the circuit board described here: http://reprap.org/wiki/Temperature_Sensor_v2.0 Usage: python createTemperatureLookup.py [options] Options: -h, --help show this help --rp=... pull-up resistor --t1=ttt:rrr low temperature temperature:resistance point (around 25 degC) --t2=ttt:rrr middle temperature temperature:resistance point (around 150 degC) --t3=ttt:rrr high temperature temperature:resistance point (around 250 degC) --num-temps=... the number of temperature points to calculate (default: 36) """ from math import * import sys import getopt "Constants" ZERO = 273.15 # zero point of Kelvin scale VADC = 5 # ADC voltage VCC = 5 # supply voltage ARES = pow(2,10) # 10 Bit ADC resolution VSTEP = VADC / ARES # ADC voltage resolution TMIN = 0 # lowest temperature in table TMAX = 350 # highest temperature in table class Thermistor: "Class to do the thermistor maths" def __init__(self, rp, t1, r1, t2, r2, t3, r3): l1 = log(r1) l2 = log(r2) l3 = log(r3) y1 = 1.0 / (t1 + ZERO) # adjust scale y2 = 1.0 / (t2 + ZERO) y3 = 1.0 / (t3 + ZERO) x = (y2 - y1) / (l2 - l1) y = (y3 - y1) / (l3 - l1) c = (y - x) / ((l3 - l2) * (l1 + l2 + l3)) b = x - c * (l1**2 + l2**2 + l1*l2) a = y1 - (b + l1**2 *c)*l1 if c < 0: print "//////////////////////////////////////////////////////////////////////////////////////" print "// WARNING: negative coefficient 'c'! Something may be wrong with the measurements! //" print "//////////////////////////////////////////////////////////////////////////////////////" c = -c self.c1 = a # Steinhart-Hart coefficients self.c2 = b self.c3 = c self.rp = rp # pull-up resistance def resol(self, adc): "Convert ADC reading into a resolution" res = self.temp(adc)-self.temp(adc+1) return res def voltage(self, adc): "Convert ADC reading into a Voltage" return adc * VSTEP # convert the 10 bit ADC value to a voltage def resist(self, adc): "Convert ADC reading into a resistance in Ohms" r = self.rp * self.voltage(adc) / (VCC - self.voltage(adc)) # resistance of thermistor return r def temp(self, adc): "Convert ADC reading into a temperature in Celcius" l = log(self.resist(adc)) Tinv = self.c1 + self.c2*l + self.c3* l**3 # inverse temperature return (1/Tinv) - ZERO # temperature def adc(self, temp): "Convert temperature into a ADC reading" x = (self.c1 - (1.0 / (temp+ZERO))) / (2*self.c3) y = sqrt((self.c2 / (3*self.c3))**3 + x**2) r = exp((y-x)**(1.0/3) - (y+x)**(1.0/3)) return (r / (self.rp + r)) * ARES def main(argv): "Default values" t1 = 25 # low temperature in Kelvin (25 degC) r1 = 100000 # resistance at low temperature (10 kOhm) t2 = 150 # middle temperature in Kelvin (150 degC) r2 = 1641.9 # resistance at middle temperature (1.6 KOhm) t3 = 250 # high temperature in Kelvin (250 degC) r3 = 226.15 # resistance at high temperature (226.15 Ohm) rp = 4700; # pull-up resistor (4.7 kOhm) num_temps = 36; # number of entries for look-up table try: opts, args = getopt.getopt(argv, "h", ["help", "rp=", "t1=", "t2=", "t3=", "num-temps="]) except getopt.GetoptError as err: print str(err) usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt == "--rp": rp = int(arg) elif opt == "--t1": arg = arg.split(':') t1 = float(arg[0]) r1 = float(arg[1]) elif opt == "--t2": arg = arg.split(':') t2 = float(arg[0]) r2 = float(arg[1]) elif opt == "--t3": arg = arg.split(':') t3 = float(arg[0]) r3 = float(arg[1]) elif opt == "--num-temps": num_temps = int(arg) t = Thermistor(rp, t1, r1, t2, r2, t3, r3) increment = int((ARES-1)/(num_temps-1)); step = (TMIN-TMAX) / (num_temps-1) low_bound = t.temp(ARES-1); up_bound = t.temp(1); min_temp = int(TMIN if TMIN > low_bound else low_bound) max_temp = int(TMAX if TMAX < up_bound else up_bound) temps = range(max_temp, TMIN+step, step); print "// Thermistor lookup table for Marlin" print "// ./createTemperatureLookupMarlin.py --rp=%s --t1=%s:%s --t2=%s:%s --t3=%s:%s --num-temps=%s" % (rp, t1, r1, t2, r2, t3, r3, num_temps) print "// Steinhart-Hart Coefficients: a=%.15g, b=%.15g, c=%.15g " % (t.c1, t.c2, t.c3) print "// Theoretical limits of termistor: %.2f to %.2f degC" % (low_bound, up_bound) print print "#define NUMTEMPS %s" % (len(temps)) print "const short temptable[NUMTEMPS][2] PROGMEM = {" for temp in temps: adc = t.adc(temp) print " { (short) (%7.2f * OVERSAMPLENR ), %4s }%s // v=%.3f\tr=%.3f\tres=%.3f degC/count" % (adc , temp, \ ',' if temp != temps[-1] else ' ', \ t.voltage(adc), \ t.resist( adc), \ t.resol( adc) \ ) print "};" def usage(): print __doc__ if __name__ == "__main__": main(sys.argv[1:])
gpl-3.0
-537,075,940,209,219,650
5,179,414,662,470,698,000
39.076923
147
0.485925
false
tanghaibao/jcvi
jcvi/projects/vanilla.py
1
11915
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Plotting scripts for the vanilla genome paper. """ import logging import sys from jcvi.apps.base import ActionDispatcher, OptionParser from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.base import get_number from jcvi.formats.bed import Bed from jcvi.graphics.base import normalize_axes, panel_labels, plt, savefig from jcvi.graphics.glyph import TextCircle from jcvi.graphics.synteny import Synteny, draw_gene_legend def main(): actions = ( # Chromosome painting since WGD ("ancestral", "paint 14 chromosomes following alpha WGD (requires data)"), # main figures in text ("ploidy", "plot vanilla synteny (requires data)"), # Composite phylogeny - tree and ks ("phylogeny", "create a composite figure with tree and ks"), ("tree", "create a separate figure with tree"), ("ks", "create a separate figure with ks"), # Composite synteny - wgd and microsynteny ("synteny", "create a composite figure with wgd and microsynteny"), ("wgd", "create separate figures with wgd"), ("microsynteny", "create separate figures with microsynteny"), ) p = ActionDispatcher(actions) p.dispatch(globals()) def phylogeny(args): """ %prog phylogeny treefile ks.layout Create a composite figure with (A) tree and (B) ks. """ from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree p = OptionParser(phylogeny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x12") (datafile, layoutfile) = args logging.debug("Load tree file `{0}`".format(datafile)) t, hpd = parse_tree(datafile) fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ax1 = fig.add_axes([0, 0.4, 1, 0.6]) ax2 = fig.add_axes([0.12, 0.065, 0.8, 0.3]) margin, rmargin = 0.1, 0.2 # Left and right margin leafinfo = LeafInfoFile("leafinfo.csv").cache wgdinfo = WGDInfoFile("wgdinfo.csv").cache outgroup = "ginkgo" # Panel A draw_tree( ax1, t, hpd=hpd, margin=margin, rmargin=rmargin, supportcolor=None, internal=False, outgroup=outgroup, reroot=False, leafinfo=leafinfo, wgdinfo=wgdinfo, geoscale=True, ) from jcvi.apps.ks import Layout, KsPlot, KsFile # Panel B ks_min = 0.0 ks_max = 3.0 bins = 60 fill = False layout = Layout(layoutfile) print(layout, file=sys.stderr) kp = KsPlot(ax2, ks_max, bins, legendp="upper right") for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data( data, lo.components, label=lo.label, color=lo.color, marker=lo.marker, fill=fill, fitted=False, kde=True, ) kp.draw(filename=None) normalize_axes([root, ax1]) labels = ((0.05, 0.95, "A"), (0.05, 0.4, "B")) panel_labels(root, labels) image_name = "phylogeny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def tree(args): """ %prog tree treefile Create a tree figure. """ from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree p = OptionParser(tree.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x8") (datafile,) = args logging.debug("Load tree file `{0}`".format(datafile)) t, hpd = parse_tree(datafile) fig = plt.figure(1, (iopts.w, iopts.h)) ax1 = fig.add_axes([0, 0, 1, 1]) margin, rmargin = 0.1, 0.2 # Left and right margin leafinfo = LeafInfoFile("leafinfo.csv").cache wgdinfo = WGDInfoFile("wgdinfo.csv").cache outgroup = "ginkgo" # Panel A draw_tree( ax1, t, hpd=hpd, margin=margin, rmargin=rmargin, supportcolor=None, internal=False, outgroup=outgroup, reroot=False, leafinfo=leafinfo, wgdinfo=wgdinfo, geoscale=True, ) normalize_axes([ax1]) image_name = "tree.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def ks(args): """ %prog ks ks.layout Create a ks figure. """ p = OptionParser(ks.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x4") (layoutfile,) = args from jcvi.apps.ks import Layout, KsPlot, KsFile fig = plt.figure(1, (iopts.w, iopts.h)) ax2 = fig.add_axes([0.12, 0.12, 0.8, 0.8]) # Panel B ks_min = 0.0 ks_max = 3.0 bins = 60 fill = False layout = Layout(layoutfile) print(layout, file=sys.stderr) kp = KsPlot(ax2, ks_max, bins, legendp="upper right") for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data( data, lo.components, label=lo.label, color=lo.color, marker=lo.marker, fill=fill, fitted=False, kde=True, ) kp.draw(filename=None) image_name = "ks.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def synteny(args): """ %prog synteny vplanifoliaA_blocks.bed vplanifoliaA.sizes \ b1.blocks all.bed b1.layout Create a composite figure with (A) wgd and (B) microsynteny. """ from jcvi.graphics.chromosome import draw_chromosomes p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x12") (bedfile, sizesfile, blocksfile, allbedfile, blockslayout) = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ax1 = fig.add_axes([0, 0.5, 1, 0.5]) ax2 = fig.add_axes([0.02, 0, 0.98, 0.5]) # Panel A title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$" draw_chromosomes( ax1, bedfile, sizes=sizesfile, iopts=iopts, mergedist=200000, winsize=50000, imagemap=False, gauge=True, legend=False, title=title, ) # Panel B draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout) normalize_axes([root, ax1, ax2]) labels = ((0.05, 0.95, "A"), (0.05, 0.5, "B")) panel_labels(root, labels) image_name = "synteny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def wgd(args): """ %prog wgd vplanifoliaA_blocks.bed vplanifoliaA.sizes Create a wgd figure. """ from jcvi.graphics.chromosome import draw_chromosomes p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x5") (bedfile, sizesfile) = args fig = plt.figure(1, (iopts.w, iopts.h)) ax1 = fig.add_axes([0, 0, 1, 1]) title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$" draw_chromosomes( ax1, bedfile, sizes=sizesfile, iopts=iopts, mergedist=200000, winsize=50000, imagemap=False, gauge=True, legend=False, title=title, ) normalize_axes([ax1]) image_name = "wgd.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def microsynteny(args): """ %prog microsynteny b1.blocks all.bed b1.layout Create a microsynteny figure. """ p = OptionParser(synteny.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x6") (blocksfile, allbedfile, blockslayout) = args fig = plt.figure(1, (iopts.w, iopts.h)) ax2 = fig.add_axes([0, 0, 1, 1]) draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout) normalize_axes([ax2]) image_name = "microsynteny.pdf" savefig(image_name, dpi=iopts.dpi, iopts=iopts) def ancestral(args): """ %prog ancestral vplanifoliaA.vplanifoliaA.anchors > vplanifoliaA_blocks.bed Paint 14 chromosomes following alpha WGD. """ p = OptionParser(ancestral.__doc__) p.set_beds() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (anchorsfile,) = args qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) # We focus on the following chromosome pairs target_pairs = { (1, 1), (1, 6), (1, 8), (1, 13), (2, 4), (3, 12), (3, 14), (5, 6), (5, 8), (7, 9), (7, 11), (9, 10), (10, 11), } def get_target(achr, bchr): if "chr" not in achr and "chr" not in bchr: return None achr, bchr = get_number(achr), get_number(bchr) if achr > bchr: achr, bchr = bchr, achr if (achr, bchr) in target_pairs: return achr, bchr return None def build_bedline(astart, aend, target_pair): # target_name = "{:02d}-{:02d}".format(*target_pair) target_name = [str(x) for x in target_pair if x in (1, 2, 3, 5, 7, 10)][0] return "\t".join( str(x) for x in (astart.seqid, astart.start, aend.end, target_name) ) # Iterate through the blocks, store any regions that has hits to one of the # target_pairs ac = AnchorFile(anchorsfile) blocks = ac.blocks outbed = Bed() for i, block in enumerate(blocks): a, b, scores = zip(*block) a = [qorder[x] for x in a] b = [sorder[x] for x in b] astart, aend = min(a)[1], max(a)[1] bstart, bend = min(b)[1], max(b)[1] # Now convert to BED lines with new accn achr, bchr = astart.seqid, bstart.seqid target = get_target(achr, bchr) if target is None: continue outbed.add(build_bedline(astart, aend, target)) outbed.add(build_bedline(bstart, bend, target)) outbed.print_to_file(sorted=True) def ploidy(args): """ %prog ploidy b1.blocks all.bed b1.layout Build a figure that illustrates the WGD history of the vanilla genome. """ p = OptionParser(ploidy.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x6") if len(args) != 3: sys.exit(not p.print_help()) blocksfile, bedfile, blockslayout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) draw_ploidy(fig, root, blocksfile, bedfile, blockslayout) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "vanilla-karyotype" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts) def draw_ploidy(fig, root, blocksfile, bedfile, blockslayout): switchidsfile = "switch.ids" Synteny( fig, root, blocksfile, bedfile, blockslayout, scalebar=True, switch=switchidsfile, ) # Legend showing the orientation of the genes draw_gene_legend(root, 0.2, 0.3, 0.53) # WGD labels radius = 0.025 tau_color = "#bebada" alpha_color = "#bc80bd" label_color = "k" pad = 0.05 for y in (0.74 + 1.5 * pad, 0.26 - 1.5 * pad): TextCircle( root, 0.25, y, r"$\alpha^{O}$", radius=radius, fc=alpha_color, color=label_color, fontweight="bold", ) TextCircle( root, 0.75, y, r"$\alpha^{O}$", radius=radius, fc=alpha_color, color=label_color, fontweight="bold", ) for y in (0.74 + 3 * pad, 0.26 - 3 * pad): TextCircle( root, 0.5, y, r"$\tau$", radius=radius, fc=tau_color, color=label_color ) if __name__ == "__main__": main()
bsd-2-clause
2,732,049,669,058,160,000
-2,058,317,397,100,800,300
25.07221
83
0.573059
false
xuegang/gpdb
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/gpstart/test_gpstart.py
9
8432
""" Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import socket from time import sleep import unittest2 as unittest import tinctest from gppylib.commands.base import Command from mpp.models import MPPTestCase from mpp.lib.PSQL import PSQL from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility from mpp.gpdb.tests.storage.walrepl.gpactivatestandby import GpactivateStandby class GpstartTestCase(MPPTestCase): ''' testcase for gpstart gpstart may return status code 1 as well as 0 in the success case. The difference is whether it produces WARNING or not, but here we don't care. ''' origin_mdd = os.environ.get('MASTER_DATA_DIRECTORY') def __init__(self,methodName): self.gputil = GpUtility() self.stdby = StandbyVerify() super(GpstartTestCase,self).__init__(methodName) def setUp(self): self.gputil.check_and_start_gpdb() stdby_presence = self.gputil.check_standby_presence() # We should forcibly recreate standby, as it might has been promoted. if stdby_presence: self.gputil.remove_standby() self.gputil.install_standby() def tearDown(self): self.gputil.remove_standby() """ Gpstart test cases in recovery mode """ def test_gpstart_from_master(self): """ tag """ self.gputil.check_and_stop_gpdb() (rc, stdout) = self.gputil.run('gpstart -a ') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) sleep(2) self.assertTrue(self.stdby.check_gp_segment_config(),'standby master not cofigured') self.assertTrue(self.stdby.check_pg_stat_replication(),'standby not in replication status') self.assertTrue(self.stdby.check_standby_processes(), 'standby processes not running') (rc, output) = self.gputil.run(command = 'ps -ef|grep "wal sender "|grep -v grep') self.assertIsNotNone(output) def test_gpstart_master_only(self): """ tag """ self.gputil.check_and_stop_gpdb() (rc, stdout) = self.gputil.run('export GPSTART_INTERNAL_MASTER_ONLY=1; ' 'gpstart -a -m ') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) (rc,output) = self.gputil.run('PGDATABASE=template1 ' "PGOPTIONS='-c gp_session_role=utility' " 'psql') self.assertEqual(rc, 0) (rc, output) = self.gputil.run('psql template1') # should fail due to master only mode self.assertEqual(rc, 2) self.gputil.run('gpstop -a -m') self.gputil.run('gpstart -a') def test_gpstart_restricted_mode_master(self): """Test -R option with standby.""" self.gputil.check_and_stop_gpdb() (rc, stdout) = self.gputil.run('gpstart -a -R') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) (rc,output) = self.gputil.run(command = 'psql template1') self.assertIn(rc, (0, 1)) self.gputil.run('gpstop -ar') def test_gpstart_master_w_timeout(self): """Test -t option with standby.""" self.gputil.check_and_stop_gpdb() (rc, output) = self.gputil.run('gpstart -a -t 30') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) self.gputil.run('gpstop -ar') def test_gpstart_no_standby(self): """Test -y with standby configured.""" self.gputil.check_and_stop_gpdb() (rc, stdout) = self.gputil.run('gpstart -a -y') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) self.assertFalse(self.stdby.check_standby_processes(), 'gpstart without standby failed, standby was running') self.gputil.run('gpstop -ar') def test_gpstart_wo_standby(self): """Test -y without standby configured.""" self.gputil.remove_standby() self.gputil.check_and_stop_gpdb() (rc, stdout) = self.gputil.run('gpstart -a -y') self.assertIn(rc, (0, 1)) self.assertTrue(self.gputil.gpstart_and_verify()) self.assertFalse(self.stdby.check_standby_processes(), 'standby processes presented') self.gputil.run('gpstop -ar') """ Gpstart, test case in failover mode """ def test_gpstart_master_only_after_failover(self): """ for test purpose, failing back to old master should remove standby from primary after activate standby """ tinctest.logger.info("start master only with -m option after failover") activatestdby = GpactivateStandby() standby_host = activatestdby.get_current_standby() standby_mdd = activatestdby.get_standby_dd() standby_port = activatestdby.get_standby_port() activatestdby.activate() self.stdby._run_remote_command(standby_host,command = 'gpstop -a') stdout = self.stdby._run_remote_command(standby_host,command = 'export GPSTART_INTERNAL_MASTER_ONLY=1; gpstart -a -m') self.assertNotRegexpMatches(stdout,"ERROR","Start master only after failover failed") self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host)) self.stdby._run_remote_command(standby_host,command = 'gpstop -a -m') self.gputil.run(command = 'gpstop -ar') self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port) def test_gpstart_master_after_failover(self): """ failover, start from new master, then recover the cluster back to have the old master active. """ tinctest.logger.info("failover, and run gpstart master test") self.gputil.check_and_start_gpdb() activatestdby = GpactivateStandby() standby_host = activatestdby.get_current_standby() standby_mdd = activatestdby.get_standby_dd() standby_port = activatestdby.get_standby_port() activatestdby.activate() self.stdby._run_remote_command(standby_host, command = 'gpstop -a') stdout = self.stdby._run_remote_command(standby_host,command = 'gpstart -a') self.assertNotRegexpMatches(stdout,"FATAL","ERROR") self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host)) self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port) def test_gpstart_original_master_after_promote(self): """ failover, start from new master, then recover the cluster back to have the old master active. """ tinctest.logger.info("activate and run gpstart for original master") activatestdby = GpactivateStandby() standby_host = activatestdby.get_current_standby() standby_mdd = activatestdby.get_standby_dd() standby_port = activatestdby.get_standby_port() activatestdby.activate() (rc, stdout) = self.gputil.run('gpstart -a -v') self.gputil.run('pg_controldata %s' % self.origin_mdd) self.stdby._run_remote_command(standby_host, command = 'pg_controldata %s' % standby_mdd) self.assertNotEqual(rc, 0) # This below error message comes from gpstart product code (if its modified change it here as well.) self.assertRegexpMatches(stdout,"Standby activated, this node no more can act as master.") self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)
apache-2.0
-3,012,234,842,850,448,000
-3,240,106,993,287,997,000
41.585859
127
0.657258
false
floraXiao/gooderp_addons
buy/wizard/buy_order_track_wizard.py
6
4873
# -*- coding: utf-8 -*- from datetime import date from odoo import models, fields, api from odoo.exceptions import UserError class BuyOrderTrackWizard(models.TransientModel): _name = 'buy.order.track.wizard' _description = u'采购订单跟踪表向导' @api.model def _default_date_start(self): return self.env.user.company_id.start_date @api.model def _default_date_end(self): return date.today() date_start = fields.Date(u'开始日期', default=_default_date_start, help=u'报表汇总的开始日期,默认为公司启用日期') date_end = fields.Date(u'结束日期', default=_default_date_end, help=u'报表汇总的结束日期,默认为当前日期') partner_id = fields.Many2one('partner', u'供应商', help=u'只统计选定的供应商') goods_id = fields.Many2one('goods', u'商品', help=u'只统计选定的商品') order_id = fields.Many2one('buy.order', u'订单号', help=u'只统计选定的订单号') warehouse_dest_id = fields.Many2one('warehouse', u'仓库', help=u'只统计选定的仓库') company_id = fields.Many2one( 'res.company', string=u'公司', change_default=True, default=lambda self: self.env['res.company']._company_default_get()) def _get_domain(self): '''返回wizard界面上条件''' domain = [ ('order_id.date', '>=', self.date_start), ('order_id.date', '<=', self.date_end) ] if self.goods_id: domain.append(('goods_id', '=', self.goods_id.id)) if self.partner_id: domain.append(('order_id.partner_id', '=', self.partner_id.id)) if self.order_id: domain.append(('order_id.id', '=', self.order_id.id)) if self.warehouse_dest_id: domain.append(('order_id.warehouse_dest_id', '=', self.warehouse_dest_id.id)) return domain def _get_wh_in_date(self, line): '''对于一个buy order line,返回一个入库日期''' wh_in_date = None move_line = self.env['wh.move.line'] wh_move_line = move_line.search([ ('buy_line_id', '=', line.id), ('state', '=', 'done') ]) if len(wh_move_line) > 1: # 如果是分批入库,则入库单明细行上的buy_line_id相同 wh_in_date = wh_move_line[0].date else: wh_in_date = wh_move_line.date return wh_in_date def _prepare_track_line(self, line, qty, amount, qty_not_in): '''返回跟踪表明细行(非小计行)''' return { 'goods_code': line.goods_id.code, 'goods_id': line.goods_id.id, 'attribute': line.attribute_id.name, 'uom': line.uom_id.name, 'date': line.order_id.date, 'order_name': line.order_id.name, 'partner_id': line.order_id.partner_id.id, 'warehouse_dest_id': line.order_id.warehouse_dest_id.id, 'goods_state': line.order_id.goods_state, 'qty': qty, 'amount': amount, 'qty_not_in': qty_not_in, 'planned_date': line.order_id.planned_date, 'wh_in_date': self._get_wh_in_date(line), # 入库日期 'note': line.note, 'type': line.order_id.type, } @api.multi def button_ok(self): self.ensure_one() res = [] if self.date_end < self.date_start: raise UserError(u'开始日期不能大于结束日期!') buy_order_line = self.env['buy.order.line'] for line in buy_order_line.search(self._get_domain(), order='goods_id'): is_buy = line.order_id.type == 'buy' and 1 or -1 # 是否购货订单 # 以下分别为明细行上数量、采购额、未入库数量,退货时均取反 qty = is_buy * line.quantity amount = is_buy * line.subtotal qty_not_in = is_buy * (line.quantity - line.quantity_in) # 创建跟踪表明细行(非小计行) track = self.env['buy.order.track'].create( self._prepare_track_line(line, qty, amount, qty_not_in)) res.append(track.id) view = self.env.ref('buy.buy_order_track_tree') return { 'name': u'采购订单跟踪表', 'view_type': 'form', 'view_mode': 'tree', 'view_id': False, 'views': [(view.id, 'tree')], 'res_model': 'buy.order.track', 'type': 'ir.actions.act_window', 'domain': [('id', 'in', res)], 'limit': 65535, }
agpl-3.0
-2,842,513,450,933,140,000
3,915,305,321,583,227,400
35.875
80
0.51887
false
octavioturra/aritial
google_appengine/google/appengine/tools/dev_appserver_upload.py
5
10654
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Helper CGI for POST uploads. Utility library contains the main logic behind simulating the blobstore uploading mechanism. Contents: GenerateBlobKey: Function for generation unique blob-keys. UploadCGIHandler: Main CGI handler class for post uploads. """ import base64 import cStringIO import datetime import md5 import random import time from google.appengine.api import datastore from google.appengine.api import datastore_errors from google.appengine.api.blobstore import blobstore try: from email.mime import base from email.mime import multipart from email import generator except ImportError: from email import Generator as generator from email import MIMEBase as base from email import MIMEMultipart as multipart STRIPPED_HEADERS = frozenset(('content-length', 'content-md5', 'content-type', )) class Error(Exception): """Base class for upload processing errors.""" class InvalidMIMETypeFormatError(Error): """MIME type was formatted incorrectly.""" def GenerateBlobKey(time_func=time.time, random_func=random.random): """Generate a unique BlobKey. BlobKey is generated using the current time stamp combined with a random number. The two values are subject to an md5 digest and base64 url-safe encoded. The new key is checked against the possibility of existence within the datastore and the random number is regenerated until there is no match. Args: time_func: Function used for generating the timestamp. Used for dependency injection. Allows for predictable results during tests. Must return a floating point UTC timestamp. random_func: Function used for generating the random number. Used for dependency injection. Allows for predictable results during tests. Returns: String version of BlobKey that is unique within the BlobInfo datastore. None if there are too many name conflicts. """ timestamp = str(time_func()) tries = 0 while tries < 10: number = str(random_func()) digester = md5.md5() digester.update(timestamp) digester.update(number) blob_key = base64.urlsafe_b64encode(digester.digest()) datastore_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key, namespace='') try: datastore.Get(datastore_key) tries += 1 except datastore_errors.EntityNotFoundError: return blob_key return None def _SplitMIMEType(mime_type): """Split MIME-type in to main and sub type. Args: mime_type: full MIME type string. Returns: (main, sub): main: Main part of mime type (application, image, text, etc). sub: Subtype part of mime type (pdf, png, html, etc). Raises: InvalidMIMETypeFormatError: If form item has incorrectly formatted MIME type. """ if mime_type: mime_type_array = mime_type.split('/') if len(mime_type_array) == 1: raise InvalidMIMETypeFormatError('Missing MIME sub-type.') elif len(mime_type_array) == 2: main_type, sub_type = mime_type_array if not(main_type and sub_type): raise InvalidMIMETypeFormatError( 'Incorrectly formatted MIME type: %s' % mime_type) return main_type, sub_type else: raise InvalidMIMETypeFormatError( 'Incorrectly formatted MIME type: %s' % mime_type) else: return 'application', 'octet-stream' class UploadCGIHandler(object): """Class used for handling an upload post. The main interface to this class is the UploadCGI method. This will recieve the upload form, store the blobs contained in the post and rewrite the blobs to contain BlobKeys instead of blobs. """ def __init__(self, blob_storage, generate_blob_key=GenerateBlobKey, now_func=datetime.datetime.now): """Constructor. Args: blob_storage: BlobStorage instance where actual blobs are stored. generate_blob_key: Function used for generating unique blob keys. now_func: Function that returns the current timestamp. """ self.__blob_storage = blob_storage self.__generate_blob_key = generate_blob_key self.__now_func = now_func def StoreBlob(self, form_item, creation): """Store form-item to blob storage. Args: form_item: FieldStorage instance that represents a specific form field. This instance should have a non-empty filename attribute, meaning that it is an uploaded blob rather than a normal form field. creation: Timestamp to associate with new blobs creation time. This parameter is provided so that all blobs in the same upload form can have the same creation date. Returns: datastore.Entity('__BlobInfo__') associated with the upload. """ main_type, sub_type = _SplitMIMEType(form_item.type) blob_key = self.__generate_blob_key() self.__blob_storage.StoreBlob(blob_key, form_item.file) content_type_formatter = base.MIMEBase(main_type, sub_type, **form_item.type_options) blob_entity = datastore.Entity('__BlobInfo__', name=str(blob_key), namespace='') blob_entity['content_type'] = ( content_type_formatter['content-type'].decode('utf-8')) blob_entity['creation'] = creation blob_entity['filename'] = form_item.filename.decode('utf-8') form_item.file.seek(0, 2) size = form_item.file.tell() form_item.file.seek(0) blob_entity['size'] = size datastore.Put(blob_entity) return blob_entity def _GenerateMIMEMessage(self, form, boundary=None): """Generate a new post from original form. Also responsible for storing blobs in the datastore. Args: form: Instance of cgi.FieldStorage representing the whole form derived from original post data. boundary: Boundary to use for resulting form. Used only in tests so that the boundary is always consistent. Returns: A MIMEMultipart instance representing the new HTTP post which should be forwarded to the developers actual CGI handler. DO NOT use the return value of this method to generate a string unless you know what you're doing and properly handle folding whitespace (from rfc822) properly. """ message = multipart.MIMEMultipart('form-data', boundary) for name, value in form.headers.items(): if name.lower() not in STRIPPED_HEADERS: message.add_header(name, value) def IterateForm(): """Flattens form in to single sequence of cgi.FieldStorage instances. The resulting cgi.FieldStorage objects are a little bit irregular in their structure. A single name can have mulitple sub-items. In this case, the root FieldStorage object has a list associated with that field name. Otherwise, the root FieldStorage object just refers to a single nested instance. Lists of FieldStorage instances occur when a form has multiple values for the same name. Yields: cgi.FieldStorage irrespective of their nesting level. """ for key in sorted(form): form_item = form[key] if isinstance(form_item, list): for list_item in form_item: yield list_item else: yield form_item creation = self.__now_func() for form_item in IterateForm(): disposition_parameters = {'name': form_item.name} if form_item.filename is None: variable = base.MIMEBase('text', 'plain') variable.set_payload(form_item.value) else: if not form_item.filename: continue disposition_parameters['filename'] = form_item.filename main_type, sub_type = _SplitMIMEType(form_item.type) blob_entity = self.StoreBlob(form_item, creation) variable = base.MIMEBase('message', 'external-body', access_type=blobstore.BLOB_KEY_HEADER, blob_key=blob_entity.key().name()) form_item.file.seek(0, 2) content_length = form_item.file.tell() form_item.file.seek(0) external = base.MIMEBase(main_type, sub_type, **form_item.type_options) headers = dict(form_item.headers) headers['Content-Length'] = str(content_length) headers[blobstore.UPLOAD_INFO_CREATION_HEADER] = ( blobstore._format_creation(creation)) for key, value in headers.iteritems(): external.add_header(key, value) external_disposition_parameters = dict(disposition_parameters) external_disposition_parameters['filename'] = form_item.filename if not external.get('Content-Disposition'): external.add_header('Content-Disposition', 'form-data', **external_disposition_parameters) variable.set_payload([external]) variable.add_header('Content-Disposition', 'form-data', **disposition_parameters) message.attach(variable) return message def GenerateMIMEMessageString(self, form, boundary=None): """Generate a new post string from original form. Args: form: Instance of cgi.FieldStorage representing the whole form derived from original post data. boundary: Boundary to use for resulting form. Used only in tests so that the boundary is always consistent. Returns: A string rendering of a MIMEMultipart instance. """ message = self._GenerateMIMEMessage(form, boundary=boundary) message_out = cStringIO.StringIO() gen = generator.Generator(message_out, maxheaderlen=0) gen.flatten(message, unixfrom=False) return message_out.getvalue()
apache-2.0
-6,862,574,835,745,717,000
2,776,853,001,405,299,700
33.816993
80
0.660128
false
VasuAgrawal/tartanHacks2015
site/flask/lib/python2.7/site-packages/pbr/tests/test_version.py
41
1137
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # Copyright 2012-2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr.tests import base from pbr import version class DeferredVersionTestCase(base.BaseTestCase): def test_cached_version(self): class MyVersionInfo(version.VersionInfo): def _get_version_from_pkg_resources(self): return "5.5.5.5" deferred_string = MyVersionInfo("openstack").\ cached_version_string() self.assertEqual("5.5.5.5", deferred_string)
mit
-3,838,392,497,600,488,000
5,339,382,691,152,342,000
35.677419
78
0.703606
false
dataxu/ansible
lib/ansible/modules/network/f5/bigip_virtual_server.py
25
53942
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: bigip_virtual_server short_description: Manage LTM virtual servers on a BIG-IP description: - Manage LTM virtual servers on a BIG-IP. version_added: "2.1" options: state: description: - The virtual server state. If C(absent), delete the virtual server if it exists. C(present) creates the virtual server and enable it. If C(enabled), enable the virtual server if it exists. If C(disabled), create the virtual server if needed, and set state to C(disabled). default: present choices: - present - absent - enabled - disabled name: description: - Virtual server name. required: True aliases: - vs destination: description: - Destination IP of the virtual server. - Required when C(state) is C(present) and virtual server does not exist. required: True aliases: - address - ip source: description: - Specifies an IP address or network from which the virtual server accepts traffic. - The virtual server accepts clients only from one of these IP addresses. - For this setting to function effectively, specify a value other than 0.0.0.0/0 or ::/0 (that is, any/0, any6/0). - In order to maximize utility of this setting, specify the most specific address prefixes covering all customer addresses and no others. - Specify the IP address in Classless Inter-Domain Routing (CIDR) format; address/prefix, where the prefix length is in bits. For example, for IPv4, 10.0.0.1/32 or 10.0.0.0/24, and for IPv6, ffe1::0020/64 or 2001:ed8:77b5:2:10:10:100:42/64. version_added: 2.5 port: description: - Port of the virtual server. Required when C(state) is C(present) and virtual server does not exist. - If you do not want to specify a particular port, use the value C(0). The result is that the virtual server will listen on any port. profiles: description: - List of profiles (HTTP, ClientSSL, ServerSSL, etc) to apply to both sides of the connection (client-side and server-side). - If you only want to apply a particular profile to the client-side of the connection, specify C(client-side) for the profile's C(context). - If you only want to apply a particular profile to the server-side of the connection, specify C(server-side) for the profile's C(context). - If C(context) is not provided, it will default to C(all). suboptions: name: description: - Name of the profile. - If this is not specified, then it is assumed that the profile item is only a name of a profile. - This must be specified if a context is specified. required: false context: description: - The side of the connection on which the profile should be applied. choices: - all - server-side - client-side default: all aliases: - all_profiles irules: version_added: "2.2" description: - List of rules to be applied in priority order. - If you want to remove existing iRules, specify a single empty value; C(""). See the documentation for an example. aliases: - all_rules enabled_vlans: version_added: "2.2" description: - List of VLANs to be enabled. When a VLAN named C(all) is used, all VLANs will be allowed. VLANs can be specified with or without the leading partition. If the partition is not specified in the VLAN, then the C(partition) option of this module will be used. - This parameter is mutually exclusive with the C(disabled_vlans) parameter. disabled_vlans: version_added: 2.5 description: - List of VLANs to be disabled. If the partition is not specified in the VLAN, then the C(partition) option of this module will be used. - This parameter is mutually exclusive with the C(enabled_vlans) parameters. pool: description: - Default pool for the virtual server. - If you want to remove the existing pool, specify an empty value; C(""). See the documentation for an example. policies: description: - Specifies the policies for the virtual server aliases: - all_policies snat: description: - Source network address policy. required: false choices: - None - Automap - Name of a SNAT pool (eg "/Common/snat_pool_name") to enable SNAT with the specific pool default_persistence_profile: description: - Default Profile which manages the session persistence. - If you want to remove the existing default persistence profile, specify an empty value; C(""). See the documentation for an example. description: description: - Virtual server description. fallback_persistence_profile: description: - Specifies the persistence profile you want the system to use if it cannot use the specified default persistence profile. - If you want to remove the existing fallback persistence profile, specify an empty value; C(""). See the documentation for an example. version_added: 2.3 partition: description: - Device partition to manage resources on. default: Common version_added: 2.5 metadata: description: - Arbitrary key/value pairs that you can attach to a pool. This is useful in situations where you might want to annotate a virtual to me managed by Ansible. - Key names will be stored as strings; this includes names that are numbers. - Values for all of the keys will be stored as strings; this includes values that are numbers. - Data will be persisted, not ephemeral. version_added: 2.5 notes: - Requires BIG-IP software version >= 11 - Requires the netaddr Python package on the host. This is as easy as pip install netaddr. requirements: - netaddr extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Modify Port of the Virtual Server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret state: present partition: Common name: my-virtual-server port: 8080 delegate_to: localhost - name: Delete virtual server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret state: absent partition: Common name: my-virtual-server delegate_to: localhost - name: Add virtual server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret state: present partition: Common name: my-virtual-server destination: 10.10.10.10 port: 443 pool: my-pool snat: Automap description: Test Virtual Server profiles: - http - fix - name: clientssl context: server-side - name: ilx context: client-side policies: - my-ltm-policy-for-asm - ltm-uri-policy - ltm-policy-2 - ltm-policy-3 enabled_vlans: - /Common/vlan2 delegate_to: localhost - name: Add FastL4 virtual server bigip_virtual_server: destination: 1.1.1.1 name: fastl4_vs port: 80 profiles: - fastL4 state: present - name: Add iRules to the Virtual Server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret name: my-virtual-server irules: - irule1 - irule2 delegate_to: localhost - name: Remove one iRule from the Virtual Server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret name: my-virtual-server irules: - irule2 delegate_to: localhost - name: Remove all iRules from the Virtual Server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret name: my-virtual-server irules: "" delegate_to: localhost - name: Remove pool from the Virtual Server bigip_virtual_server: server: lb.mydomain.net user: admin password: secret name: my-virtual-server pool: "" delegate_to: localhost - name: Add metadata to virtual bigip_pool: server: lb.mydomain.com user: admin password: secret state: absent name: my-pool partition: Common metadata: ansible: 2.4 updated_at: 2017-12-20T17:50:46Z delegate_to: localhost ''' RETURN = r''' description: description: New description of the virtual server. returned: changed type: string sample: This is my description default_persistence_profile: description: Default persistence profile set on the virtual server. returned: changed type: string sample: /Common/dest_addr destination: description: Destination of the virtual server. returned: changed type: string sample: 1.1.1.1 disabled: description: Whether the virtual server is disabled, or not. returned: changed type: bool sample: True disabled_vlans: description: List of VLANs that the virtual is disabled for. returned: changed type: list sample: ['/Common/vlan1', '/Common/vlan2'] enabled: description: Whether the virtual server is enabled, or not. returned: changed type: bool sample: False enabled_vlans: description: List of VLANs that the virtual is enabled for. returned: changed type: list sample: ['/Common/vlan5', '/Common/vlan6'] fallback_persistence_profile: description: Fallback persistence profile set on the virtual server. returned: changed type: string sample: /Common/source_addr irules: description: iRules set on the virtual server. returned: changed type: list sample: ['/Common/irule1', '/Common/irule2'] pool: description: Pool that the virtual server is attached to. returned: changed type: string sample: /Common/my-pool policies: description: List of policies attached to the virtual. returned: changed type: list sample: ['/Common/policy1', '/Common/policy2'] port: description: Port that the virtual server is configured to listen on. returned: changed type: int sample: 80 profiles: description: List of profiles set on the virtual server. returned: changed type: list sample: [{'name': 'tcp', 'context': 'server-side'}, {'name': 'tcp-legacy', 'context': 'client-side'}] snat: description: SNAT setting of the virtual server. returned: changed type: string sample: Automap source: description: Source address, in CIDR form, set on the virtual server. returned: changed type: string sample: 1.2.3.4/32 metadata: description: The new value of the virtual. returned: changed type: dict sample: {'key1': 'foo', 'key2': 'bar'} ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from ansible.module_utils.six import iteritems from collections import namedtuple try: # Sideband repository used for dev from library.module_utils.network.f5.bigip import HAS_F5SDK from library.module_utils.network.f5.bigip import F5Client from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fqdn_name from library.module_utils.network.f5.common import f5_argument_spec try: from library.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False HAS_DEVEL_IMPORTS = True except ImportError: # Upstream Ansible from ansible.module_utils.network.f5.bigip import HAS_F5SDK from ansible.module_utils.network.f5.bigip import F5Client from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fqdn_name from ansible.module_utils.network.f5.common import f5_argument_spec try: from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False try: import netaddr HAS_NETADDR = True except ImportError: HAS_NETADDR = False class Parameters(AnsibleF5Parameters): api_map = { 'sourceAddressTranslation': 'snat', 'fallbackPersistence': 'fallback_persistence_profile', 'persist': 'default_persistence_profile', 'vlansEnabled': 'vlans_enabled', 'vlansDisabled': 'vlans_disabled', 'profilesReference': 'profiles', 'policiesReference': 'policies', 'rules': 'irules' } api_attributes = [ 'description', 'destination', 'disabled', 'enabled', 'fallbackPersistence', 'metadata', 'persist', 'policies', 'pool', 'profiles', 'rules', 'source', 'sourceAddressTranslation', 'vlans', 'vlansEnabled', 'vlansDisabled', ] updatables = [ 'description', 'default_persistence_profile', 'destination', 'disabled_vlans', 'enabled', 'enabled_vlans', 'fallback_persistence_profile', 'irules', 'metadata', 'pool', 'policies', 'port', 'profiles', 'snat', 'source' ] returnables = [ 'description', 'default_persistence_profile', 'destination', 'disabled', 'disabled_vlans', 'enabled', 'enabled_vlans', 'fallback_persistence_profile', 'irules', 'metadata', 'pool', 'policies', 'port', 'profiles', 'snat', 'source', 'vlans', 'vlans_enabled', 'vlans_disabled' ] profiles_mutex = [ 'sip', 'sipsession', 'iiop', 'rtsp', 'http', 'diameter', 'diametersession', 'radius', 'ftp', 'tftp', 'dns', 'pptp', 'fix' ] def to_return(self): result = {} for returnable in self.returnables: try: result[returnable] = getattr(self, returnable) except Exception as ex: pass result = self._filter_params(result) return result def _fqdn_name(self, value): if value is not None and not value.startswith('/'): return '/{0}/{1}'.format(self.partition, value) return value def is_valid_ip(self, value): try: netaddr.IPAddress(value) return True except (netaddr.core.AddrFormatError, ValueError): return False def _format_port_for_destination(self, ip, port): addr = netaddr.IPAddress(ip) if addr.version == 6: if port == 0: result = '.any' else: result = '.{0}'.format(port) else: result = ':{0}'.format(port) return result def _format_destination(self, address, port, route_domain): if port is None: if route_domain is None: result = '{0}'.format( self._fqdn_name(address) ) else: result = '{0}%{1}'.format( self._fqdn_name(address), route_domain ) else: port = self._format_port_for_destination(address, port) if route_domain is None: result = '{0}{1}'.format( self._fqdn_name(address), port ) else: result = '{0}%{1}{2}'.format( self._fqdn_name(address), route_domain, port ) return result class ApiParameters(Parameters): @property def destination(self): if self._values['destination'] is None: return None destination = self.destination_tuple result = self._format_destination(destination.ip, destination.port, destination.route_domain) return result @property def source(self): if self._values['source'] is None: return None try: addr = netaddr.IPNetwork(self._values['source']) result = '{0}/{1}'.format(str(addr.ip), addr.prefixlen) return result except netaddr.core.AddrFormatError: raise F5ModuleError( "The source IP address must be specified in CIDR format: address/prefix" ) @property def destination_tuple(self): Destination = namedtuple('Destination', ['ip', 'port', 'route_domain']) # Remove the partition if self._values['destination'] is None: result = Destination(ip=None, port=None, route_domain=None) return result destination = re.sub(r'^/[a-zA-Z0-9_.-]+/', '', self._values['destination']) if self.is_valid_ip(destination): result = Destination( ip=destination, port=None, route_domain=None ) return result # Covers the following examples # # /Common/2700:bc00:1f10:101::6%2.80 # 2700:bc00:1f10:101::6%2.80 # 1.1.1.1%2:80 # /Common/1.1.1.1%2:80 # /Common/2700:bc00:1f10:101::6%2.any # pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)[:.](?P<port>[0-9]+|any)' matches = re.search(pattern, destination) if matches: try: port = int(matches.group('port')) except ValueError: # Can be a port of "any". This only happens with IPv6 port = matches.group('port') if port == 'any': port = 0 ip = matches.group('ip') if not self.is_valid_ip(ip): raise F5ModuleError( "The provided destination is not a valid IP address" ) result = Destination( ip=matches.group('ip'), port=port, route_domain=int(matches.group('route_domain')) ) return result pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)' matches = re.search(pattern, destination) if matches: ip = matches.group('ip') if not self.is_valid_ip(ip): raise F5ModuleError( "The provided destination is not a valid IP address" ) result = Destination( ip=matches.group('ip'), port=None, route_domain=int(matches.group('route_domain')) ) return result parts = destination.split('.') if len(parts) == 4: # IPv4 ip, port = destination.split(':') if not self.is_valid_ip(ip): raise F5ModuleError( "The provided destination is not a valid IP address" ) result = Destination( ip=ip, port=int(port), route_domain=None ) return result elif len(parts) == 2: # IPv6 ip, port = destination.split('.') try: port = int(port) except ValueError: # Can be a port of "any". This only happens with IPv6 if port == 'any': port = 0 if not self.is_valid_ip(ip): raise F5ModuleError( "The provided destination is not a valid IP address" ) result = Destination( ip=ip, port=port, route_domain=None ) return result else: result = Destination(ip=None, port=None, route_domain=None) return result @property def port(self): destination = self.destination_tuple self._values['port'] = destination.port return destination.port @property def route_domain(self): destination = self.destination_tuple self._values['route_domain'] = destination.route_domain return destination.route_domain @property def profiles(self): if 'items' not in self._values['profiles']: return None result = [] for item in self._values['profiles']['items']: context = item['context'] name = item['name'] if context in ['all', 'serverside', 'clientside']: result.append(dict(name=name, context=context, fullPath=item['fullPath'])) else: raise F5ModuleError( "Unknown profile context found: '{0}'".format(context) ) return result @property def policies(self): if 'items' not in self._values['policies']: return None result = [] for item in self._values['policies']['items']: name = item['name'] partition = item['partition'] result.append(dict(name=name, partition=partition)) return result @property def default_persistence_profile(self): if self._values['default_persistence_profile'] is None: return None # These persistence profiles are always lists when we get them # from the REST API even though there can only be one. We'll # make it a list again when we get to the Difference engine. return self._values['default_persistence_profile'][0] @property def enabled(self): if 'enabled' in self._values: return True else: return False @property def disabled(self): if 'disabled' in self._values: return True return False @property def metadata(self): if self._values['metadata'] is None: return None result = [] for md in self._values['metadata']: tmp = dict(name=str(md['name'])) if 'value' in md: tmp['value'] = str(md['value']) else: tmp['value'] = '' result.append(tmp) return result class ModuleParameters(Parameters): def _handle_profile_context(self, tmp): if 'context' not in tmp: tmp['context'] = 'all' else: if 'name' not in tmp: raise F5ModuleError( "A profile name must be specified when a context is specified." ) tmp['context'] = tmp['context'].replace('server-side', 'serverside') tmp['context'] = tmp['context'].replace('client-side', 'clientside') def _handle_clientssl_profile_nuances(self, profile): if profile['name'] != 'clientssl': return if profile['context'] != 'clientside': profile['context'] = 'clientside' @property def destination(self): addr = self._values['destination'].split("%")[0] if not self.is_valid_ip(addr): raise F5ModuleError( "The provided destination is not a valid IP address" ) result = self._format_destination(addr, self.port, self.route_domain) return result @property def destination_tuple(self): Destination = namedtuple('Destination', ['ip', 'port', 'route_domain']) if self._values['destination'] is None: result = Destination(ip=None, port=None, route_domain=None) return result addr = self._values['destination'].split("%")[0] result = Destination(ip=addr, port=self.port, route_domain=self.route_domain) return result @property def source(self): if self._values['source'] is None: return None try: addr = netaddr.IPNetwork(self._values['source']) result = '{0}/{1}'.format(str(addr.ip), addr.prefixlen) return result except netaddr.core.AddrFormatError: raise F5ModuleError( "The source IP address must be specified in CIDR format: address/prefix" ) @property def port(self): if self._values['port'] is None: return None if self._values['port'] in ['*', 'any']: return 0 self._check_port() return int(self._values['port']) def _check_port(self): try: port = int(self._values['port']) except ValueError: raise F5ModuleError( "The specified port was not a valid integer" ) if 0 <= port <= 65535: return port raise F5ModuleError( "Valid ports must be in range 0 - 65535" ) @property def irules(self): results = [] if self._values['irules'] is None: return None if len(self._values['irules']) == 1 and self._values['irules'][0] == '': return '' for irule in self._values['irules']: result = self._fqdn_name(irule) results.append(result) return results @property def profiles(self): if self._values['profiles'] is None: return None if len(self._values['profiles']) == 1 and self._values['profiles'][0] == '': return '' result = [] for profile in self._values['profiles']: tmp = dict() if isinstance(profile, dict): tmp.update(profile) self._handle_profile_context(tmp) if 'name' not in profile: tmp['name'] = profile tmp['fullPath'] = self._fqdn_name(tmp['name']) self._handle_clientssl_profile_nuances(tmp) else: tmp['name'] = profile tmp['context'] = 'all' tmp['fullPath'] = self._fqdn_name(tmp['name']) self._handle_clientssl_profile_nuances(tmp) result.append(tmp) mutually_exclusive = [x['name'] for x in result if x in self.profiles_mutex] if len(mutually_exclusive) > 1: raise F5ModuleError( "Profiles {0} are mutually exclusive".format( ', '.join(self.profiles_mutex).strip() ) ) return result @property def policies(self): if self._values['policies'] is None: return None if len(self._values['policies']) == 1 and self._values['policies'][0] == '': return '' result = [] policies = [self._fqdn_name(p) for p in self._values['policies']] policies = set(policies) for policy in policies: parts = policy.split('/') if len(parts) != 3: raise F5ModuleError( "The specified policy '{0}' is malformed".format(policy) ) tmp = dict( name=parts[2], partition=parts[1] ) result.append(tmp) return result @property def pool(self): if self._values['pool'] is None: return None if self._values['pool'] == '': return '' return self._fqdn_name(self._values['pool']) @property def vlans_enabled(self): if self._values['enabled_vlans'] is None: return None elif self._values['vlans_enabled'] is False: # This is a special case for "all" enabled VLANs return False if self._values['disabled_vlans'] is None: return True return False @property def vlans_disabled(self): if self._values['disabled_vlans'] is None: return None elif self._values['vlans_disabled'] is True: # This is a special case for "all" enabled VLANs return True elif self._values['enabled_vlans'] is None: return True return False @property def enabled_vlans(self): if self._values['enabled_vlans'] is None: return None elif any(x.lower() for x in self._values['enabled_vlans'] if x.lower() in ['all', '*']): result = [self._fqdn_name('all')] if result[0].endswith('/all'): if self._values['__warnings'] is None: self._values['__warnings'] = [] self._values['__warnings'].append( dict( msg="Usage of the 'ALL' value for 'enabled_vlans' parameter is deprecated. Use '*' instead", version='2.5' ) ) return result results = list(set([self._fqdn_name(x) for x in self._values['enabled_vlans']])) results.sort() return results @property def disabled_vlans(self): if self._values['disabled_vlans'] is None: return None elif any(x.lower() for x in self._values['disabled_vlans'] if x.lower() in ['all', '*']): raise F5ModuleError( "You cannot disable all VLANs. You must name them individually." ) results = list(set([self._fqdn_name(x) for x in self._values['disabled_vlans']])) results.sort() return results @property def vlans(self): disabled = self.disabled_vlans if disabled: return self.disabled_vlans return self.enabled_vlans @property def state(self): if self._values['state'] == 'present': return 'enabled' return self._values['state'] @property def snat(self): if self._values['snat'] is None: return None lowercase = self._values['snat'].lower() if lowercase in ['automap', 'none']: return dict(type=lowercase) snat_pool = self._fqdn_name(self._values['snat']) return dict(pool=snat_pool, type='snat') @property def default_persistence_profile(self): if self._values['default_persistence_profile'] is None: return None if self._values['default_persistence_profile'] == '': return '' profile = self._fqdn_name(self._values['default_persistence_profile']) parts = profile.split('/') if len(parts) != 3: raise F5ModuleError( "The specified 'default_persistence_profile' is malformed" ) result = dict( name=parts[2], partition=parts[1] ) return result @property def fallback_persistence_profile(self): if self._values['fallback_persistence_profile'] is None: return None if self._values['fallback_persistence_profile'] == '': return '' result = self._fqdn_name(self._values['fallback_persistence_profile']) return result @property def enabled(self): if self._values['state'] == 'enabled': return True elif self._values['state'] == 'disabled': return False else: return None @property def disabled(self): if self._values['state'] == 'enabled': return False elif self._values['state'] == 'disabled': return True else: return None @property def metadata(self): if self._values['metadata'] is None: return None if self._values['metadata'] == '': return [] result = [] try: for k, v in iteritems(self._values['metadata']): tmp = dict(name=str(k)) if v: tmp['value'] = str(v) else: tmp['value'] = '' result.append(tmp) except AttributeError: raise F5ModuleError( "The 'metadata' parameter must be a dictionary of key/value pairs." ) return result class Changes(Parameters): pass class UsableChanges(Changes): @property def vlans(self): if self._values['vlans'] is None: return None elif len(self._values['vlans']) == 0: return [] elif any(x for x in self._values['vlans'] if x.lower() in ['/common/all', 'all']): return [] return self._values['vlans'] class ReportableChanges(Changes): @property def snat(self): if self._values['snat'] is None: return None result = self._values['snat'].get('type', None) if result == 'automap': return 'Automap' elif result == 'none': return 'none' result = self._values['snat'].get('pool', None) return result @property def destination(self): params = ApiParameters(params=dict(destination=self._values['destination'])) result = params.destination_tuple.ip return result @property def port(self): params = ApiParameters(params=dict(destination=self._values['destination'])) result = params.destination_tuple.port return result @property def default_persistence_profile(self): if len(self._values['default_persistence_profile']) == 0: return [] profile = self._values['default_persistence_profile'][0] result = '/{0}/{1}'.format(profile['partition'], profile['name']) return result @property def policies(self): if len(self._values['policies']) == 0: return [] result = ['/{0}/{1}'.format(x['partition'], x['name']) for x in self._values['policies']] return result @property def enabled_vlans(self): if len(self._values['vlans']) == 0 and self._values['vlans_disabled'] is True: return 'all' elif len(self._values['vlans']) > 0 and self._values['vlans_enabled'] is True: return self._values['vlans'] @property def disabled_vlans(self): if len(self._values['vlans']) > 0 and self._values['vlans_disabled'] is True: return self._values['vlans'] class Difference(object): def __init__(self, want, have=None): self.have = have self.want = want def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 def to_tuple(self, items): result = [] for x in items: tmp = [(str(k), str(v)) for k, v in iteritems(x)] result += tmp return result def _diff_complex_items(self, want, have): if want == [] and have is None: return None if want is None: return None w = self.to_tuple(want) h = self.to_tuple(have) if set(w).issubset(set(h)): return None else: return want def _update_vlan_status(self, result): if self.want.vlans_disabled is not None: if self.want.vlans_disabled != self.have.vlans_disabled: result['vlans_disabled'] = self.want.vlans_disabled result['vlans_enabled'] = not self.want.vlans_disabled elif self.want.vlans_enabled is not None: if any(x.lower().endswith('/all') for x in self.want.vlans): if self.have.vlans_enabled is True: result['vlans_disabled'] = True result['vlans_enabled'] = False elif self.want.vlans_enabled != self.have.vlans_enabled: result['vlans_disabled'] = not self.want.vlans_enabled result['vlans_enabled'] = self.want.vlans_enabled @property def destination(self): addr_tuple = [self.want.destination, self.want.port, self.want.route_domain] if all(x for x in addr_tuple if x is None): return None have = self.have.destination_tuple if self.want.port is None: self.want.update({'port': have.port}) if self.want.route_domain is None: self.want.update({'route_domain': have.route_domain}) if self.want.destination_tuple.ip is None: address = have.ip else: address = self.want.destination_tuple.ip want = self.want._format_destination(address, self.want.port, self.want.route_domain) if want != self.have.destination: return self.want._fqdn_name(want) @property def source(self): if self.want.source is None: return None want = netaddr.IPNetwork(self.want.source) have = netaddr.IPNetwork(self.have.destination_tuple.ip) if want.version != have.version: raise F5ModuleError( "The source and destination addresses for the virtual server must be be the same type (IPv4 or IPv6)." ) if self.want.source != self.have.source: return self.want.source @property def vlans(self): if self.want.vlans is None: return None elif self.want.vlans == [] and self.have.vlans is None: return None elif self.want.vlans == self.have.vlans: return None # Specifically looking for /all because the vlans return value will be # an FQDN list. This means that "all" will be returned as "/partition/all", # ex, /Common/all. # # We do not want to accidentally match values that would end with the word # "all", like "vlansall". Therefore we look for the forward slash because this # is a path delimiter. elif any(x.lower().endswith('/all') for x in self.want.vlans): if self.have.vlans is None: return None else: return [] else: return self.want.vlans @property def enabled_vlans(self): return self.vlan_status @property def disabled_vlans(self): return self.vlan_status @property def vlan_status(self): result = dict() vlans = self.vlans if vlans is not None: result['vlans'] = vlans self._update_vlan_status(result) return result @property def port(self): result = self.destination if result is not None: return dict( destination=result ) @property def profiles(self): if self.want.profiles is None: return None if self.want.profiles == '' and len(self.have.profiles) > 0: have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles]) if len(self.have.profiles) == 1: if not any(x[0] in ['tcp', 'udp', 'sctp'] for x in have): return [] else: return None else: return [] if self.want.profiles == '' and len(self.have.profiles) == 0: return None want = set([(p['name'], p['context'], p['fullPath']) for p in self.want.profiles]) have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles]) if len(have) == 0: return self.want.profiles elif len(have) == 1: if want != have: return self.want.profiles else: if not any(x[0] == 'tcp' for x in want): have = set([x for x in have if x[0] != 'tcp']) if not any(x[0] == 'udp' for x in want): have = set([x for x in have if x[0] != 'udp']) if not any(x[0] == 'sctp' for x in want): have = set([x for x in have if x[0] != 'sctp']) want = set([(p[2], p[1]) for p in want]) have = set([(p[2], p[1]) for p in have]) if want != have: return self.want.profiles @property def fallback_persistence_profile(self): if self.want.fallback_persistence_profile is None: return None if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is not None: return "" if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is None: return None if self.want.fallback_persistence_profile != self.have.fallback_persistence_profile: return self.want.fallback_persistence_profile @property def default_persistence_profile(self): if self.want.default_persistence_profile is None: return None if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is not None: return [] if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is None: return None if self.have.default_persistence_profile is None: return [self.want.default_persistence_profile] w_name = self.want.default_persistence_profile.get('name', None) w_partition = self.want.default_persistence_profile.get('partition', None) h_name = self.have.default_persistence_profile.get('name', None) h_partition = self.have.default_persistence_profile.get('partition', None) if w_name != h_name or w_partition != h_partition: return [self.want.default_persistence_profile] @property def policies(self): if self.want.policies is None: return None if self.want.policies == '' and self.have.policies is None: return None if self.want.policies == '' and len(self.have.policies) > 0: return [] if not self.have.policies: return self.want.policies want = set([(p['name'], p['partition']) for p in self.want.policies]) have = set([(p['name'], p['partition']) for p in self.have.policies]) if not want == have: return self.want.policies @property def snat(self): if self.want.snat is None: return None if self.want.snat['type'] != self.have.snat['type']: result = dict(snat=self.want.snat) return result if self.want.snat.get('pool', None) is None: return None if self.want.snat['pool'] != self.have.snat['pool']: result = dict(snat=self.want.snat) return result @property def enabled(self): if self.want.state == 'enabled' and self.have.disabled: result = dict( enabled=True, disabled=False ) return result elif self.want.state == 'disabled' and self.have.enabled: result = dict( enabled=False, disabled=True ) return result @property def irules(self): if self.want.irules is None: return None if self.want.irules == '' and len(self.have.irules) > 0: return [] if self.want.irules == '' and len(self.have.irules) == 0: return None if sorted(set(self.want.irules)) != sorted(set(self.have.irules)): return self.want.irules @property def pool(self): if self.want.pool is None: return None if self.want.pool == '' and self.have.pool is not None: return "" if self.want.pool == '' and self.have.pool is None: return None if self.want.pool != self.have.pool: return self.want.pool @property def metadata(self): if self.want.metadata is None: return None elif len(self.want.metadata) == 0 and self.have.metadata is None: return None elif len(self.want.metadata) == 0: return [] elif self.have.metadata is None: return self.want.metadata result = self._diff_complex_items(self.want.metadata, self.have.metadata) return result class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.have = ApiParameters() self.want = ModuleParameters(client=self.client, params=self.module.params) self.changes = UsableChanges() def exec_module(self): changed = False result = dict() state = self.want.state try: if state in ['present', 'enabled', 'disabled']: changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def absent(self): if self.exists(): return self.remove() return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource") return True def get_reportable_changes(self): result = ReportableChanges(params=self.changes.to_return()) return result def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def exists(self): result = self.client.api.tm.ltm.virtuals.virtual.exists( name=self.want.name, partition=self.want.partition ) return result def create(self): required_resources = ['destination', 'port'] self._set_changed_options() # This must be changed back to a list to make a valid REST API # value. The module manipulates this as a normal dictionary if self.want.default_persistence_profile is not None: self.want.update({'default_persistence_profile': [self.want.default_persistence_profile]}) if self.want.destination is None: raise F5ModuleError( "'destination' must be specified when creating a virtual server" ) if all(getattr(self.want, v) is None for v in required_resources): raise F5ModuleError( "You must specify both of " + ', '.join(required_resources) ) if self.want.enabled_vlans is not None: if any(x for x in self.want.enabled_vlans if x.lower() in ['/common/all', 'all']): self.want.update( dict( enabled_vlans=[], vlans_disabled=True, vlans_enabled=False ) ) if self.want.source and self.want.destination: want = netaddr.IPNetwork(self.want.source) have = netaddr.IPNetwork(self.want.destination_tuple.ip) if want.version != have.version: raise F5ModuleError( "The source and destination addresses for the virtual server must be be the same type (IPv4 or IPv6)." ) if self.module.check_mode: return True self.create_on_device() return True def update_on_device(self): params = self.changes.api_params() resource = self.client.api.tm.ltm.virtuals.virtual.load( name=self.want.name, partition=self.want.partition ) resource.modify(**params) def read_current_from_device(self): result = self.client.api.tm.ltm.virtuals.virtual.load( name=self.want.name, partition=self.want.partition, requests_params=dict( params=dict( expandSubcollections='true' ) ) ) params = result.attrs params.update(dict(kind=result.to_dict().get('kind', None))) result = ApiParameters(params=params) return result def create_on_device(self): params = self.want.api_params() self.client.api.tm.ltm.virtuals.virtual.create( name=self.want.name, partition=self.want.partition, **params ) def remove_from_device(self): resource = self.client.api.tm.ltm.virtuals.virtual.load( name=self.want.name, partition=self.want.partition ) if resource: resource.delete() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( state=dict( default='present', choices=['present', 'absent', 'disabled', 'enabled'] ), name=dict( required=True, aliases=['vs'] ), destination=dict( aliases=['address', 'ip'] ), port=dict( type='int' ), profiles=dict( type='list', aliases=['all_profiles'], options=dict( name=dict(required=False), context=dict(default='all', choices=['all', 'server-side', 'client-side']) ) ), policies=dict( type='list', aliases=['all_policies'] ), irules=dict( type='list', aliases=['all_rules'] ), enabled_vlans=dict( type='list' ), disabled_vlans=dict( type='list' ), pool=dict(), description=dict(), snat=dict(), default_persistence_profile=dict(), fallback_persistence_profile=dict(), source=dict(), metadata=dict(type='raw'), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['enabled_vlans', 'disabled_vlans'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) if not HAS_F5SDK: module.fail_json(msg="The python f5-sdk module is required") if not HAS_NETADDR: module.fail_json(msg="The python netaddr module is required") try: client = F5Client(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) module.exit_json(**results) except F5ModuleError as ex: cleanup_tokens(client) module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
4,232,497,453,782,452,700
474,646,612,850,440,770
31.554013
122
0.566961
false
RichIsMyName/PicklingToolsRepo
PythonModule/ptools/xmldumper_defs.py
3
3487
# Options for dictionaries -> XML # If XML attributes are being folded up, then you may # want to prepend a special character to distinguish attributes # from nested tags: an underscore is the usual default. If # you don't want a prepend char, use XML_DUMP_NO_PREPEND option XML_PREPEND_CHAR = '_' # When dumping, by DEFAULT the keys that start with _ become # attributes (this is called "unfolding"). You may want to keep # those keys as tags. Consider: # # { 'top': { '_a':'1', '_b': 2 }} # # DEFAULT behavior, this becomes: # <top a="1" b="2"></top> This moves the _names to attributes # # But, you may want all _ keys to stay as tags: that's the purpose of this opt # <top> <_a>1</_a> <_b>2</b> </top> XML_DUMP_PREPEND_KEYS_AS_TAGS = 0x100 # Any value that is simple (i.e., contains no nested # content) will be placed in the attributes bin: # For examples: # { 'top': { 'x':'1', 'y': 2 }} -> <top x="1" y="2"></top> XML_DUMP_SIMPLE_TAGS_AS_ATTRIBUTES = 0x200 # By default, everything dumps as strings (without quotes), but those things # that are strings lose their "stringedness", which means # they can't be "evaled" on the way back in. This option makes # Vals that are strings dump with quotes. XML_DUMP_STRINGS_AS_STRINGS = 0x400 # Like XML_DUMP_STRINGS_AS_STRINGS, but this one ONLY # dumps strings with quotes if it thinks Eval will return # something else. For example in { 's': '123' } : '123' is # a STRING, not a number. When evalled with an XMLLoader # with XML_LOAD_EVAL_CONTENT flag, that will become a number. XML_DUMP_STRINGS_BEST_GUESS = 0x800 # Show nesting when you dump: like "prettyPrint": basically, it shows # nesting XML_DUMP_PRETTY = 0x1000 # Arrays of POD (plain old data: ints, real, complex, etc) can # dump as huge lists: By default they just dump with one tag # and then a list of numbers. If you set this option, they dump # as a true XML list (<data>1.0/<data><data>2.0</data> ...) # which is very expensive, but is easier to use with other # tools (spreadsheets that support lists, etc.). XML_DUMP_POD_LIST_AS_XML_LIST = 0x2000 # When dumping an empty tag, what do you want it to be? # I.e., what is <empty></empty> # Normally (DEFAULT) this is an empty dictionary 'empty': {} # If you want that to be empty content, as in an empty string, # set this option: 'empty': "" # NOTE: You don't need this option if you are using # XML_DUMP_STRINGS_AS_STRINGS or XML_DUMP_STRINGS_BEST_GUESS XML_DUMP_PREFER_EMPTY_STRINGS = 0x4000 # When dumping dictionaries in order, a dict BY DEFAULT prints # out the keys in sorted/alphabetic order and BY DEFAULT an OrderedDict # prints out in the OrderedDict order. The "unnatural" order # for a dict is to print out in "random" order (but probably slightly # faster). The "unnatural" order for an OrderedDict is sorted # (because normally we use an OrderedDict because we WANTS its # notion of order) XML_DUMP_UNNATURAL_ORDER = 0x8000 # Even though illegal XML, allow element names starting with Digits: # when it does see a starting digit, it turns it into an _digit # so that it is still legal XML XML_TAGS_ACCEPTS_DIGITS = 0x80 # Allows digits as starting XML tags, even though illegal XML. # This preserves the number as a tag. XML_DIGITS_AS_TAGS = 0x80000 # When dumping XML, the default is to NOT have the XML header # <?xml version="1.0">: Specifying this option will always make that # the header always precedes all content XML_STRICT_HDR = 0x10000
bsd-3-clause
-2,143,477,689,954,563,600
-6,499,744,240,413,842,000
38.625
78
0.711213
false
cwacek/python-jsonschema-objects
python_jsonschema_objects/wrapper_types.py
1
11522
import collections import logging import six from python_jsonschema_objects import util from python_jsonschema_objects.validators import registry, ValidationError from python_jsonschema_objects.util import lazy_format as fmt logger = logging.getLogger(__name__) class ArrayWrapper(collections.abc.MutableSequence): """A wrapper for array-like structures. This implements all of the array like behavior that one would want, with a dirty-tracking mechanism to avoid constant validation costs. """ @property def strict(self): return getattr(self, "_strict_", False) def __len__(self): return len(self.data) def mark_or_revalidate(self): if self.strict: self.validate() else: self._dirty = True def __delitem__(self, index): self.data.pop(index) self.mark_or_revalidate() def insert(self, index, value): self.data.insert(index, value) self.mark_or_revalidate() def __setitem__(self, index, value): self.data[index] = value self.mark_or_revalidate() def __getitem__(self, idx): return self.typed_elems[idx] def __eq__(self, other): if isinstance(other, ArrayWrapper): return self.for_json() == other.for_json() else: return self.for_json() == other def __init__(self, ary): """Initialize a wrapper for the array Args: ary: (list-like, or ArrayWrapper) """ """ Marks whether or not the underlying array has been modified """ self._dirty = True """ Holds a typed copy of the array """ self._typed = None if isinstance(ary, (list, tuple, collections.abc.Sequence)): self.data = ary else: raise TypeError("Invalid value given to array validator: {0}".format(ary)) logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary)) @property def typed_elems(self): logger.debug(fmt("Accessing typed_elems of ArrayWrapper {} ", self)) if self._typed is None or self._dirty is True: self.validate() return self._typed def __repr__(self): return "<%s=%s>" % (self.__class__.__name__, str(self.data)) @classmethod def from_json(cls, jsonmsg): import json msg = json.loads(jsonmsg) obj = cls(msg) obj.validate() return obj def serialize(self): enc = util.ProtocolJSONEncoder() return enc.encode(self.typed_elems) def for_json(self): from python_jsonschema_objects import classbuilder out = [] for item in self.typed_elems: if isinstance( item, (classbuilder.ProtocolBase, classbuilder.LiteralValue, ArrayWrapper), ): out.append(item.for_json()) else: out.append(item) return out def validate(self): if self.strict or self._dirty: self.validate_items() self.validate_length() self.validate_uniqueness() return True def validate_uniqueness(self): if getattr(self, "uniqueItems", False) is True: testset = set(repr(item) for item in self.data) if len(testset) != len(self.data): raise ValidationError( "{0} has duplicate elements, but uniqueness required".format( self.data ) ) def validate_length(self): if getattr(self, "minItems", None) is not None: if len(self.data) < self.minItems: raise ValidationError( "{1} has too few elements. Wanted {0}.".format( self.minItems, self.data ) ) if getattr(self, "maxItems", None) is not None: if len(self.data) > self.maxItems: raise ValidationError( "{1} has too many elements. Wanted {0}.".format( self.maxItems, self.data ) ) def validate_items(self): """Validates the items in the backing array, including performing type validation. Sets the _typed property and clears the dirty flag as a side effect Returns: The typed array """ logger.debug(fmt("Validating {}", self)) from python_jsonschema_objects import classbuilder if self.__itemtype__ is None: return type_checks = self.__itemtype__ if not isinstance(type_checks, (tuple, list)): # we were given items = {'type': 'blah'} ; thus ensure the type for all data. type_checks = [type_checks] * len(self.data) elif len(type_checks) > len(self.data): raise ValidationError( "{1} does not have sufficient elements to validate against {0}".format( self.__itemtype__, self.data ) ) typed_elems = [] for elem, typ in zip(self.data, type_checks): if isinstance(typ, dict): for param, paramval in six.iteritems(typ): validator = registry(param) if validator is not None: validator(paramval, elem, typ) typed_elems.append(elem) elif util.safe_issubclass(typ, classbuilder.LiteralValue): val = typ(elem) val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, classbuilder.ProtocolBase): if not isinstance(elem, typ): try: if isinstance( elem, (six.string_types, six.integer_types, float) ): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError( "'{0}' is not a valid value for '{1}': {2}".format( elem, typ, e ) ) else: val = elem val.validate() typed_elems.append(val) elif util.safe_issubclass(typ, ArrayWrapper): val = typ(elem) val.validate() typed_elems.append(val) elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)): try: if isinstance(elem, (six.string_types, six.integer_types, float)): val = typ(elem) else: val = typ(**util.coerce_for_expansion(elem)) except TypeError as e: raise ValidationError( "'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e) ) else: val.validate() typed_elems.append(val) self._dirty = False self._typed = typed_elems return typed_elems @staticmethod def create(name, item_constraint=None, **addl_constraints): """Create an array validator based on the passed in constraints. If item_constraint is a tuple, it is assumed that tuple validation is being performed. If it is a class or dictionary, list validation will be performed. Classes are assumed to be subclasses of ProtocolBase, while dictionaries are expected to be basic types ('string', 'number', ...). addl_constraints is expected to be key-value pairs of any of the other constraints permitted by JSON Schema v4. """ logger.debug( fmt( "Constructing ArrayValidator with {} and {}", item_constraint, addl_constraints, ) ) from python_jsonschema_objects import classbuilder klassbuilder = addl_constraints.pop( "classbuilder", None ) # type: python_jsonschema_objects.classbuilder.ClassBuilder props = {} if item_constraint is not None: if isinstance(item_constraint, (tuple, list)): for i, elem in enumerate(item_constraint): isdict = isinstance(elem, (dict,)) isklass = isinstance(elem, type) and util.safe_issubclass( elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue) ) if not any([isdict, isklass]): raise TypeError( "Item constraint (position {0}) is not a schema".format(i) ) elif isinstance( item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef) ): pass elif util.safe_issubclass(item_constraint, ArrayWrapper): pass else: isdict = isinstance(item_constraint, (dict,)) isklass = isinstance(item_constraint, type) and util.safe_issubclass( item_constraint, (classbuilder.ProtocolBase, classbuilder.LiteralValue), ) if not any([isdict, isklass]): raise TypeError("Item constraint is not a schema") if isdict and "$ref" in item_constraint: if klassbuilder is None: raise TypeError( "Cannot resolve {0} without classbuilder".format( item_constraint["$ref"] ) ) item_constraint = klassbuilder.resolve_type( item_constraint["$ref"], name ) elif isdict and item_constraint.get("type") == "array": # We need to create a sub-array validator. item_constraint = ArrayWrapper.create( name + "#sub", item_constraint=item_constraint["items"], addl_constraints=item_constraint, ) elif isdict and "oneOf" in item_constraint: # We need to create a TypeProxy validator uri = "{0}_{1}".format(name, "<anonymous_list_type>") type_array = klassbuilder.construct_objects( item_constraint["oneOf"], uri ) item_constraint = classbuilder.TypeProxy(type_array) elif isdict and item_constraint.get("type") == "object": """ We need to create a ProtocolBase object for this anonymous definition""" uri = "{0}_{1}".format(name, "<anonymous_list_type>") item_constraint = klassbuilder.construct(uri, item_constraint) props["__itemtype__"] = item_constraint strict = addl_constraints.pop("strict", False) props["_strict_"] = strict props.update(addl_constraints) validator = type(str(name), (ArrayWrapper,), props) return validator
mit
8,188,301,818,989,156,000
1,991,041,090,643,914,800
34.343558
96
0.518486
false
0x0all/scikit-learn
examples/plot_multioutput_face_completion.py
330
3019
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces X_test = test[:, :np.ceil(0.5 * n_pixels)] y_test = test[:, np.floor(0.5 * n_pixels):] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
-8,309,961,904,005,619,000
6,984,569,590,488,459,000
29.806122
72
0.629016
false
cloud9209/cloud9209_flask
lib/bs4/tests/test_lxml.py
273
2965
"""Tests to ensure that the lxml tree builder generates good trees.""" import re import warnings try: import lxml.etree LXML_PRESENT = True LXML_VERSION = lxml.etree.LXML_VERSION except ImportError, e: LXML_PRESENT = False LXML_VERSION = (0,) if LXML_PRESENT: from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML from bs4 import ( BeautifulSoup, BeautifulStoneSoup, ) from bs4.element import Comment, Doctype, SoupStrainer from bs4.testing import skipIf from bs4.tests import test_htmlparser from bs4.testing import ( HTMLTreeBuilderSmokeTest, XMLTreeBuilderSmokeTest, SoupTest, skipIf, ) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its tree builder.") class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilder() def test_out_of_range_entity(self): self.assertSoupEquals( "<p>foo&#10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#x10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#1000000000;bar</p>", "<p>foobar</p>") # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this # test if an old version of lxml is installed. @skipIf( not LXML_PRESENT or LXML_VERSION < (2,3,5,0), "Skipping doctype test for old version of lxml to avoid segfault.") def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_beautifulstonesoup_is_xml_parser(self): # Make sure that the deprecated BSS class uses an xml builder # if one is installed. with warnings.catch_warnings(record=True) as w: soup = BeautifulStoneSoup("<b />") self.assertEqual(u"<b/>", unicode(soup.b)) self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message)) def test_real_xhtml_document(self): """lxml strips the XML definition from an XHTML doc, which is fine.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b''), markup.replace(b'\n', b'').replace( b'<?xml version="1.0" encoding="utf-8"?>', b'')) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its XML tree builder.") class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilderForXML()
apache-2.0
8,672,223,969,246,564,000
-7,783,409,226,135,738,000
31.582418
86
0.650253
false
franciscogmm/FinancialAnalysisUsingNLPandMachineLearning
SentimentAnalysis - Polarity - Domain Specific Lexicon.py
1
2667
import csv import pandas as pd import nltk from nltk import FreqDist,ngrams from nltk.corpus import stopwords import string from os import listdir from os.path import isfile, join def ngram_list(file,n): f = open(file,'rU') raw = f.read() raw = raw.replace('\n',' ') #raw = raw.decode('utf8') #raw = raw.decode("utf-8", 'ignore') ngramz = ngrams(raw.split(),n) return ngramz def IsNotNull(value): return value is not None and len(value) > 0 mypath = '/Users/francis/Documents/FORDHAM/2nd Term/Text Analytics/' #path where files are located onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] dict_p = [] f = open('positive.txt', 'r') for line in f: t = line.strip().lower() if IsNotNull(t): dict_p.append(t) f.close dict_n = [] f = open('negative.txt', 'r') for line in f: t = line.strip().lower() if IsNotNull(t): dict_n.append(t) f.close totallist = [] rowlist = [] qa = 0 qb = 0 counti = 0 for i in onlyfiles: if i.endswith('.txt'): # get code j = i.replace('.txt','') # string filename file = mypath + str(i) print i f = open(file,'rU') raw = f.read() #print type(raw) raw = [w.translate(None, string.punctuation) for w in raw] raw = ''.join(raw) raw = raw.replace('\n','') raw = raw.replace(' ','') #print raw qa = 0 qb = 0 for word in dict_p: if word in raw: qa += 1 for word in dict_n: if word in raw: qb += 1 qc = qa - qb if qc > 0: sentiment = 'POSITIVE' elif qc == 0: sentiment = 'NEUTRAL' else: sentiment = 'NEGATIVE' rowlist.append(i) rowlist.append(qa) rowlist.append(qb) rowlist.append(qc) rowlist.append(sentiment) print counti counti += 1 totallist.append(rowlist) rowlist = [] else: pass labels = ('file', 'P', 'N', 'NET', 'SENTIMENT') df = pd.DataFrame.from_records(totallist, columns = labels) df.to_csv('oursentiment.csv', index = False) #print dict_p # allbigrams.append(ngram_list(file,2)) # print i + ' BIGRAM - OK' # alltrigrams.append(ngram_list(file,3)) # print i + ' TRIGRAM - OK' # allfourgrams.append(ngram_list(file,4)) # print i + ' FOURGRAM - OK' # allfivegrams.append(ngram_list(file,5)) # print i + ' TRIGRAM - OK' # allsixgrams.append(ngram_list(file,6)) # print i + ' SIXGRAM - OK' # allsevengrams.append(ngram_list(file,7)) # print i + ' SEVENGRAM - OK' # alleightgrams.append(ngram_list(file,8)) # print i + ' EIGHTGRAM - OK'
mit
-1,698,508,121,120,995,800
6,786,184,166,350,664,000
21.420168
98
0.578178
false
oihane/server-tools
auth_dynamic_groups/model/res_users.py
14
2115
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2013 Therp BV (<http://therp.nl>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.models import Model from openerp.modules.registry import RegistryManager from openerp import SUPERUSER_ID class res_users(Model): _inherit = 'res.users' def _login(self, db, login, password): uid = super(res_users, self)._login(db, login, password) if uid: self.update_dynamic_groups(uid, db) return uid def update_dynamic_groups(self, uid, db): pool = RegistryManager.get(db) cr = pool._db.cursor() user = pool.get('res.users').browse(cr, SUPERUSER_ID, uid) groups_obj = pool.get('res.groups') user.write( { 'groups_id': [ (4, dynamic_group.id) if dynamic_group.eval_dynamic_group_condition(uid=uid) else (3, dynamic_group.id) for dynamic_group in groups_obj.browse( cr, SUPERUSER_ID, groups_obj.search(cr, SUPERUSER_ID, [('is_dynamic', '=', True)])) ], }) cr.commit() cr.close()
agpl-3.0
537,790,743,696,500,600
-2,920,063,858,423,437,000
37.454545
78
0.552246
false
DocBO/mubosym
mubosym/simple_tire_model_interface.py
2
8005
# -*- coding: utf-8 -*- """ simple_tire_model_interface =========================== Created on Wed May 27 18:02:53 2015 @author: oliver """ import sys from sympy import lambdify, symbols import numpy as np b = [1.5,0.,1100.,0.,300.,0.,0.,0.,-2.,0.,0.,0.,0.,0.] a = [1.4,0.,1100.,1100.,10.,0.,0.,-2.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.] def Pacejka_F_long(Fz, slip): """ longitudinal force :param (float) Fz: Force in vertical direction in N :param (float) slip: relative slip fraction (0..1) """ if Fz == 0: return 0. slip = slip*100.0 Fz = Fz/1000.0 C = b[0] D = Fz*(b[1]*Fz+b[2]) BCD = (Fz*(b[3]*Fz+b[4]))*np.exp(-b[5]*Fz) B = BCD/(C*D) H = b[9]*Fz+b[10] V = b[11]*Fz+b[12] E = ((b[6]*Fz*Fz)+b[7]*Fz+b[8])*(1-(b[13]*np.sign(slip+H))) Bx1 = B*(slip+H) Fx = D*np.sin(C*np.arctan(Bx1-E*(Bx1-np.arctan(Bx1))))+V return Fx def Pacejka_F_lat(Fz, alpha, camber): """ lateral force :param (float) Fz: Force in vertical direction in N :param (float) alpha: slip angle in rad :param (float) camber: camber angle in rad """ if Fz == 0: return 0. alpha = alpha * 180.0/np.pi camber = camber * 180.0/np.pi Fz = Fz/1000.0 C = a[0] D = Fz*(a[1]*Fz+a[2])*(1-a[15]*np.power(camber,2)) BCD = a[3]*np.sin(np.arctan(Fz/a[4])*2)*(1-a[5]*np.fabs(camber)) B = BCD/(C*D) H = a[8]*Fz+a[9]+a[10]*camber V = a[11]*Fz+a[12]+(a[13]*Fz+a[14])*camber*Fz E = (a[6]*Fz+a[7])*(1-(a[16]*camber+a[17])*np.sign(alpha+H)) Bx1 = B*(alpha+H) Fy = D*np.sin(C*np.arctan(Bx1-E*(Bx1-np.arctan(Bx1))))+V return Fy class simple_tire_model(): """ A one body force model consists of: * coordinate trafo generalized coords -> body coordinates (denoted list of) including pos, vel, orientation, and omega * force calculator given as a python function with input according to our interface * some preparation function: lambdifier to include symbolic functions into lambdas """ def __init__(self, paras = []): # setup parameters self.t = 0. self.D = 200000. self.gamma = 200.0 self.y0 = 0.0 self.C_side = 4500.0 self.C_align = 200.0 self.C_slip = 300.0 self.R_tire = 0.33 self.trafo = [] self.F_max = 4500.0 self.gamma_torque = 2.0 self.max_p = 100.0 self.tau = 0.1 self.signals = [] self.signals_values = [] def set_coordinate_trafo(self, tr): """ Input function for the coordinate trafo expressions (sympy). :param tr: the transformation expressions as given in the mbs setup for the body """ self.trafo = tr def set_subs_dicts(self, subs_dicts): for sd in subs_dicts: for ii in range(len(self.trafo)): self.trafo[ii] = self.trafo[ii].subs(sd) for ii in range(len(self.signals)): self.signals[ii] = self.signals[ii].subs(sd) def add_signal(self, expr): self.signals.append(expr) def lambdify_trafo(self, generalized_coords): """ This is the core function to lambdify the coordinate trafos in general the trafos must be explicitely set via set_coordinate_trafo called from MBSCore (see therein) :param generalized_coords: the generalized coords (symbols) of the final mbs setup (called in kaneify) """ if len(self.trafo) < 12: print("call set_coordinate_trafo first") sys.exit(0) # for ii in range(12): # print ii, self.trafo[ii] t = symbols('t') self.lam_t = lambdify(generalized_coords, t) self.lam_x = lambdify(generalized_coords, self.trafo[0]) self.lam_y = lambdify(generalized_coords, self.trafo[1]) self.lam_z = lambdify(generalized_coords, self.trafo[2]) self.lam_nx = lambdify(generalized_coords, self.trafo[3]) self.lam_ny = lambdify(generalized_coords, self.trafo[4]) self.lam_nz = lambdify(generalized_coords, self.trafo[5]) self.lam_x_pt = lambdify(generalized_coords, self.trafo[6]) self.lam_y_pt = lambdify(generalized_coords, self.trafo[7]) self.lam_z_pt = lambdify(generalized_coords, self.trafo[8]) self.lam_omega_x = lambdify(generalized_coords, self.trafo[9]) self.lam_omega_y = lambdify(generalized_coords, self.trafo[10]) self.lam_omega_z = lambdify(generalized_coords, self.trafo[11]) self.lam_signals = [ lambdify(generalized_coords, expr) for expr in self.signals] def trafo_lam(self, w): """ Just for reference all coordinate trafos as lambdas (not used at the moment). :param w: the generalized coords (float numbers) of the final mbs setup """ return [self.lam_t(*w), self.lam_x(*w), self.lam_y(*w), self.lam_z(*w), \ self.lam_nx(*w), self.lam_ny(*w), self.lam_nz(*w), \ self.lam_x_pt(*w), self.lam_y_pt(*w), self.lam_z_pt(*w), \ self.lam_omega_x(*w), self.lam_omega_y(*w), self.lam_omega_z(*w)] def force_lam(self, w): """ The model force/torque via lambdified expressions, input parameter here is always the full state vecor t,q,u. Output is the force/toque via the model calc-function the nested input for the calc routine is fully possible written out: * self.lam_t, self.lam_x, self.lam_y, self.lam_z, * self.lam_nx, self.lam_ny, self.lam_nz, * self.lam_x_pt, self.lam_y_pt, self.lam_z_pt, * self.lam_omega_x self.lam_omega_y, self.lam_omega_z but can be reduced to a subset :param w: the generalized coords (float numbers) of the final mbs setup, The order has to be equal the one in calc. """ self.signals_values = [x(*w) for x in self.lam_signals] return self._calc([ self.lam_t(*w), self.lam_y(*w), \ self.lam_x_pt(*w), self.lam_y_pt(*w), self.lam_z_pt(*w),\ self.lam_omega_z(*w) ] ) def _calc(self, inp): """ The python function which connects some external model calculation with the mbs model e.g. tire-model, rail model. It is only called internally by force_lam. * input list inp are some relevant model coordinates (out of 12 possible): [ x, y, z, nx, ny, nz, x_pt, y_pt, z_pt, omega_x, omega_y, omega_z ] = inp * output list is force in cartesian coord. world and torque cartesian coord. world :param inp: the subset of all possible coord. of one body (see list), here expected as float numbers. The order has to be equal the one in force_lam """ signals = self.signals_values [ t, y , x_pt, y_pt, z_pt, omega_z ] = inp #print "SSSig: ",signals eps = 5.0e-1 #preset values F_x = 0. F_y = 0. F_z = 0. T_x = 0. T_y = 0. T_z = 0. #vertical reaction force if y<0: F_y = -self.D*(y-self.y0) - self.gamma*y_pt else: F_y = 0. #side slip angle alpha = np.arctan2(z_pt,(x_pt+eps)) #in the tire carrier frame #slip slip = (omega_z * self.R_tire + x_pt)/np.abs(x_pt+eps) ####################################################### # Pacejka - Model: F_z = - Pacejka_F_lat(F_y, alpha, 0.) F_x = - Pacejka_F_long(F_y, slip) T_z = F_x * self.R_tire - self.gamma_torque * omega_z #print F_y #self.oz += 1./10.*delta_t * T_z return [F_x, F_y, F_z, T_x, T_y, T_z], [F_x, F_y, F_z, T_z, 1e+2*slip, 180/np.pi*alpha] def get_signal_length(self): return 6
mit
257,699,056,953,202,370
-7,100,947,989,873,954,000
37.301435
157
0.547408
false
mdaif/olympia
apps/landfill/tests/test_categories.py
15
1025
# -*- coding: utf-8 -*- from nose.tools import eq_, ok_ import amo import amo.tests from addons.models import Category from constants.applications import APPS from landfill.categories import generate_categories class CategoriesTests(amo.tests.TestCase): def test_categories_themes_generation(self): data = generate_categories() eq_(len(data), Category.objects.all().count()) eq_(len(data), 15) def test_categories_themes_translations(self): with self.activate(locale='es'): data = generate_categories() ok_(unicode(data[0].name).startswith(u'(español) ')) def test_categories_addons_generation(self): data = generate_categories(APPS['android']) eq_(len(data), Category.objects.all().count()) eq_(len(data), 10) def test_categories_addons_translations(self): with self.activate(locale='es'): data = generate_categories(APPS['android']) ok_(unicode(data[0].name).startswith(u'(español) '))
bsd-3-clause
-980,603,392,306,768,500
-108,073,287,076,555,710
32
64
0.655914
false
alex/raven
raven/handlers/logging.py
3
4470
""" raven.handlers.logging ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import datetime import logging import sys import traceback from raven.base import Client from raven.utils.encoding import to_string from raven.utils.stacks import iter_stack_frames class SentryHandler(logging.Handler, object): def __init__(self, *args, **kwargs): client = kwargs.get('client_cls', Client) if len(args) == 1: arg = args[0] if isinstance(arg, basestring): self.client = client(dsn=arg) elif isinstance(arg, Client): self.client = arg else: raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % ( self.__class__.__name__, arg, )) elif 'client' in kwargs: self.client = kwargs['client'] elif len(args) == 2 and not kwargs: servers, key = args self.client = client(servers=servers, key=key) else: self.client = client(*args, **kwargs) logging.Handler.__init__(self) def emit(self, record): # from sentry.client.middleware import SentryLogMiddleware # # Fetch the request from a threadlocal variable, if available # request = getattr(SentryLogMiddleware.thread, 'request', None) self.format(record) # Avoid typical config issues by overriding loggers behavior if record.name.startswith('sentry.errors'): print >> sys.stderr, to_string(record.message) return try: return self._emit(record) except Exception: print >> sys.stderr, "Top level Sentry exception caught - failed creating log record" print >> sys.stderr, to_string(record.msg) print >> sys.stderr, to_string(traceback.format_exc()) try: self.client.capture('Exception') except Exception: pass def _emit(self, record, **kwargs): data = {} for k, v in record.__dict__.iteritems(): if '.' not in k and k not in ('culprit',): continue data[k] = v stack = getattr(record, 'stack', None) if stack is True: stack = iter_stack_frames() if stack: frames = [] started = False last_mod = '' for item in stack: if isinstance(item, (list, tuple)): frame, lineno = item else: frame, lineno = item, item.f_lineno if not started: f_globals = getattr(frame, 'f_globals', {}) module_name = f_globals.get('__name__', '') if last_mod.startswith('logging') and not module_name.startswith('logging'): started = True else: last_mod = module_name continue frames.append((frame, lineno)) stack = frames extra = getattr(record, 'data', {}) # Add in all of the data from the record that we aren't already capturing for k in record.__dict__.keys(): if k in ('stack', 'name', 'args', 'msg', 'levelno', 'exc_text', 'exc_info', 'data', 'created', 'levelname', 'msecs', 'relativeCreated'): continue if k.startswith('_'): continue extra[k] = record.__dict__[k] date = datetime.datetime.utcfromtimestamp(record.created) # If there's no exception being processed, exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info and all(record.exc_info): handler = self.client.get_handler('raven.events.Exception') data.update(handler.capture(exc_info=record.exc_info)) data['checksum'] = handler.get_hash(data) data['level'] = record.levelno data['logger'] = record.name return self.client.capture('Message', message=record.msg, params=record.args, stack=stack, data=data, extra=extra, date=date, **kwargs)
bsd-3-clause
-2,073,148,055,127,827,000
7,534,504,757,999,348,000
34.19685
148
0.54094
false
sassoftware/mint
mint/django_rest/rbuilder/querysets/views/v1/views.py
1
8001
#!/usr/bin/python # # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from django import http from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from mint.django_rest.deco import return_xml, requires, access, xObjRequires from mint.django_rest.rbuilder import service # from mint.django_rest.rbuilder.querysets import models from mint.django_rest.rbuilder.rbac.rbacauth import rbac, manual_rbac from mint.django_rest.rbuilder.errors import PermissionDenied from mint.django_rest.rbuilder.rbac.manager.rbacmanager import \ READSET, MODSETDEF def rbac_can_read_queryset(view, request, query_set_id, *args, **kwargs): obj = view.mgr.getQuerySet(query_set_id) if obj.is_public: # existance of querysets like "All Systems", etc, are not stealthed. # but may vary in size depending on the user accessing them's permissions # (ReadMember) on their contents. return True user = view.mgr.getSessionInfo().user[0] ok = view.mgr.userHasRbacPermission(user, obj, READSET) return ok def rbac_can_write_queryset(view, request, query_set_id, *args, **kwargs): obj = view.mgr.getQuerySet(query_set_id) user = view.mgr.getSessionInfo().user[0] return view.mgr.userHasRbacPermission(user, obj, MODSETDEF) class BaseQuerySetService(service.BaseService): pass class QuerySetsService(BaseQuerySetService): # rbac is handled semimanually for this function -- show only # querysets that we have permission to see # but don't use full rbac code, because that is implemented using querysets # and is too meta. @access.authenticated @return_xml def rest_GET(self, request): user = request._authUser querysets = self.mgr.getQuerySets() return self.mgr.filterRbacQuerysets(user, querysets, request) # not used above, but still needed by load_from_href and other # functions def get(self): return self.mgr.getQuerySets() @access.admin @requires('query_set', load=True, save=True) @return_xml def rest_POST(self, request, query_set): return self.mgr.addQuerySet(query_set, request._authUser) class QuerySetService(BaseQuerySetService): # rbac is handled semimanually for this function -- show only # querysets that we have permission to see # but don't use full rbac code, because that is implemented using querysets # and is too meta. @rbac(manual_rbac) @return_xml def rest_GET(self, request, query_set_id): user = request._authUser queryset = self.mgr.getQuerySet(query_set_id) if not queryset.is_public and not self.mgr.userHasRbacPermission( user, queryset, READSET, request ): raise PermissionDenied() return queryset # not used above, but still needed by load_from_href and other # functions def get(self, query_set_id): return self.mgr.getQuerySet(query_set_id) @access.admin @requires('query_set') @return_xml def rest_PUT(self, request, query_set_id, query_set): oldQuerySet = self.mgr.getQuerySet(query_set_id) if oldQuerySet.pk != query_set.pk: raise PermissionDenied(msg='Attempting to reassign ID') return self.mgr.updateQuerySet(query_set, request._authUser) @access.admin def rest_DELETE(self, request, query_set_id): querySet = self.mgr.getQuerySet(query_set_id) self.mgr.deleteQuerySet(querySet) response = http.HttpResponse(status=204) return response class QuerySetAllResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetAllResult(query_set_id, for_user=request._authUser) class QuerySetUniverseResultService(BaseQuerySetService): '''the parent queryset of all objects of a given type''' @access.authenticated @return_xml def rest_GET(self, request, query_set_id): self.mgr.getQuerySetUniverseSet(query_set_id) url = reverse('QuerySetAllResult', args=[query_set_id]) return HttpResponseRedirect(url) class QuerySetChosenResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetChosenResult(query_set_id, for_user=request._authUser) @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['systems', 'users', 'images', 'targets', 'project_branch_stages', 'projects', 'grants', 'roles']) @return_xml def rest_PUT(self, request, query_set_id, *args, **kwargs): resources = kwargs.items()[0][1] return self.mgr.addQuerySetChosen(query_set_id, resources, request._authUser) @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role']) @return_xml def rest_POST(self, request, query_set_id, *args, **kwargs): resource = kwargs.items()[0][1] self.mgr.updateQuerySetChosen(query_set_id, resource, request._authUser) return resource @rbac(rbac_can_write_queryset) # TODO: source fromc onstant somewhere @requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role']) @return_xml def rest_DELETE(self, request, query_set_id, *args, **kwargs): resource = kwargs.items()[0][1] return self.mgr.deleteQuerySetChosen(query_set_id, resource, request._authUser) class QuerySetFilteredResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): return self.mgr.getQuerySetFilteredResult(query_set_id, for_user=request._authUser) class QuerySetChildResultService(BaseQuerySetService): @access.authenticated @return_xml def rest_GET(self, request, query_set_id): if rbac_can_read_queryset(self, request, query_set_id): return self.mgr.getQuerySetChildResult(query_set_id) else: return self.mgr.getQuerySetChildResult(query_set_id, for_user=request._authUser) # this is not expected to be our final API for removing child members # but serves as a temporary one in case someone needs it. Deleting # the queryset is not an option to clear it out because associated # grants would be purged. @rbac(rbac_can_write_queryset) @requires('query_set') @return_xml def rest_DELETE(self, request, query_set_id, query_set): return self.mgr.deleteQuerySetChild(query_set_id, query_set, for_user=request._authUser) class QuerySetJobsService(BaseQuerySetService): # no way to list running jobs at the moment # since all jobs run immediately @rbac(rbac_can_read_queryset) @xObjRequires('job') def rest_POST(self, request, query_set_id, job): '''launch a job on this queryset''' queryset = self.mgr.getQuerySet(query_set_id) self.mgr.scheduleQuerySetJobAction( queryset, job ) return http.HttpResponse(status=200) class QuerySetFilterDescriptorService(BaseQuerySetService): # @access.authenticated @return_xml def rest_GET(self, request, query_set_id=None): return self.mgr.getQuerySetFilterDescriptor(query_set_id)
apache-2.0
230,036,178,087,797,220
5,844,107,222,374,655,000
36.56338
122
0.699038
false
dneg/cortex
python/IECoreMaya/TransformationMatrixParameterUI.py
12
5608
########################################################################## # # Copyright (c) 2010, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import maya.cmds import IECore import IECoreMaya ## The UI for the TransformationMatrixParameter supports the following ## userData() ## ## - "visibleFields" IECore.StringVectorData, A list of fields to ## display in the UI. Possible values are (D marks a default): ## "translate" D ## "rotate", D ## "scale" D ## "shear" D ## "rotatePivot", ## "rotatePivotTranslation", ## "scalePivot" ## "scalePivotTranslation" class TransformationMatrixParameterUI( IECoreMaya.ParameterUI ) : _allFields = ( "translate", "rotate", "scale", "shear", "scalePivot", "scalePivotTranslation", "rotatePivot", "rotatePivotTranslation" ) def __init__( self, node, parameter, **kw ) : self._outerColumn = maya.cmds.columnLayout( adj=True ) IECoreMaya.ParameterUI.__init__( self, node, parameter, self._outerColumn, **kw ) maya.cmds.rowLayout( numberOfColumns=2, parent=self._outerColumn ) self._label = maya.cmds.text( label = self.label(), font = "tinyBoldLabelFont", align = "right", annotation = self.description() ) self._manip = maya.cmds.button( label="Manipulate" ) maya.cmds.setParent("..") maya.cmds.setParent("..") self._fields = {} self.__kw = kw.copy() self.replace( self.node(), self.parameter ) def replace( self, node, parameter ) : IECoreMaya.ParameterUI.replace( self, node, parameter ) currentParent = maya.cmds.setParent( query=True ) visibleFields = IECore.StringVectorData( ( "translate", "rotate", "scale", "shear" ) ) with IECore.IgnoredExceptions( KeyError ) : userDataFields = parameter.userData()["UI"]["visibleFields"] visibleFields = [] for u in userDataFields : if u not in TransformationMatrixParameterUI._allFields: IECore.msg( IECore.Msg.Level.Warning, "TransformationMatrixParameterUI", "Invalid field '%s' requested in UI userData for '%s'. Available fields are %s." % ( u, parameter.name, TransformationMatrixParameterUI._allFields ) ) continue visibleFields.append( u ) for f in self._fields.keys() : if f not in visibleFields : maya.cmds.deleteUI( self._fields[f][0] ) del self._fields[f] fnPH = IECoreMaya.FnParameterisedHolder( node ) baseName = fnPH.parameterPlugPath( parameter ) self._addPopupMenu( parentUI=self._label, attributeName=baseName ) for f in visibleFields : if f not in self._fields : layout = maya.cmds.rowLayout( numberOfColumns = 4, parent = self._outerColumn, columnWidth4 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ] ) maya.cmds.text( label=f, font="smallPlainLabelFont", align="right" ) self._fields[f] = ( layout, maya.cmds.floatField(), maya.cmds.floatField(), maya.cmds.floatField() ) maya.cmds.connectControl( self._fields[f][1], "%s%s%i" % ( baseName, f, 0 ) ) maya.cmds.connectControl( self._fields[f][2], "%s%s%i" % ( baseName, f, 1 ) ) maya.cmds.connectControl( self._fields[f][3], "%s%s%i" % ( baseName, f, 2 ) ) maya.cmds.button( self._manip, edit = True, # The manip is currently only registered for float types visible = isinstance( parameter, IECore.TransformationMatrixfParameter ), command = self._createCallback( IECore.curry( IECoreMaya.ManipulatorUI.manipulateParameter, node, parameter ) ) ) maya.cmds.setParent( currentParent ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.TransformationMatrixfParameter, TransformationMatrixParameterUI ) IECoreMaya.ParameterUI.registerUI( IECore.TypeId.TransformationMatrixdParameter, TransformationMatrixParameterUI )
bsd-3-clause
-8,721,979,908,194,452,000
4,976,562,139,357,623,000
38.77305
208
0.690442
false
simonpatrick/bite-project
deps/gdata-python-client/tests/gdata_tests/client_smoke_test.py
39
1743
#!/usr/bin/env python # # Copyright (C) 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import unittest import gdata.test_config as conf import gdata.analytics.client import gdata.apps.emailsettings.client import gdata.blogger.client import gdata.spreadsheets.client import gdata.calendar_resource.client import gdata.contacts.client import gdata.docs.client import gdata.projecthosting.client import gdata.sites.client class ClientSmokeTest(unittest.TestCase): def test_check_auth_client_classes(self): conf.check_clients_with_auth(self, ( gdata.analytics.client.AnalyticsClient, gdata.apps.emailsettings.client.EmailSettingsClient, gdata.blogger.client.BloggerClient, gdata.spreadsheets.client.SpreadsheetsClient, gdata.calendar_resource.client.CalendarResourceClient, gdata.contacts.client.ContactsClient, gdata.docs.client.DocsClient, gdata.projecthosting.client.ProjectHostingClient, gdata.sites.client.SitesClient )) def suite(): return conf.build_suite([ClientSmokeTest]) if __name__ == '__main__': unittest.main()
apache-2.0
1,556,855,090,494,300,700
8,462,508,834,591,535,000
29.051724
74
0.751004
false
Anonymike/pasta-bot
plugins/google_broken.py
1
3457
import random from util import hook, http, text, database, web import re def api_get(kind, query): """Use the RESTful Google Search API""" url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ 'v=1.0&safe=off' return http.get_json(url % kind, q=query) @hook.command('search') @hook.command('g') @hook.command def google(inp,db=None,chan=None): """google <query> -- Returns first google search result for <query>.""" trimlength = database.get(db,'channels','trimlength','chan',chan) if not trimlength: trimlength = 9999 parsed = api_get('web', inp) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'No results found.' result = parsed['responseData']['results'][0] title = http.unescape(result['titleNoFormatting']) content = http.unescape(result['content']) if not content: content = "No description available." else: content = http.html.fromstring(content.replace('\n', '')).text_content() return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content) # @hook.command('image') @hook.command('gis') @hook.command('gi') @hook.command('image') @hook.command def googleimage(inp): """gis <query> -- Returns first Google Image result for <query>.""" parsed = api_get('images', inp) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'no images found' return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] @hook.command def gcalc(inp): "gcalc <term> -- Calculate <term> with Google Calc." soup = http.get_soup('http://www.google.com/search', q=inp) result = soup.find('span', {'class': 'cwcot'}) formula = soup.find('span', {'class': 'cwclet'}) if not result: return "Could not calculate '{}'".format(inp) return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip()) @hook.regex(r'^\>(.*\.(gif|GIF|jpg|JPG|jpeg|JPEG|png|PNG|tiff|TIFF|bmp|BMP))\s?(\d+)?') @hook.command def implying(inp): """>laughing girls.gif <num> -- Returns first Google Image result for <query>.""" try: search = inp.group(1) except: search = inp try: num = int(inp.group(3)) except: num = 0 if 'http' in search: return parsed = api_get('images', search) if not 200 <= parsed['responseStatus'] < 300: raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) if not parsed['responseData']['results']: return 'no images found' try: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][num]['unescapedUrl']) except: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][0]['unescapedUrl']) #return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] @hook.command('nym') @hook.command('littleanon') @hook.command('gfy') @hook.command def lmgtfy(inp, bot=None): "lmgtfy [phrase] - Posts a google link for the specified phrase" link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp) try: return web.isgd(link) except (web.ShortenError, http.HTTPError): return link
gpl-3.0
7,465,173,280,783,342,000
5,623,491,256,443,994,000
33.919192
119
0.639283
false
aequitas/home-assistant
homeassistant/components/homematicip_cloud/sensor.py
2
12041
"""Support for HomematicIP Cloud sensors.""" import logging from homematicip.aio.device import ( AsyncBrandSwitchMeasuring, AsyncFullFlushSwitchMeasuring, AsyncHeatingThermostat, AsyncHeatingThermostatCompact, AsyncLightSensor, AsyncMotionDetectorIndoor, AsyncMotionDetectorOutdoor, AsyncMotionDetectorPushButton, AsyncPlugableSwitchMeasuring, AsyncPresenceDetectorIndoor, AsyncTemperatureHumiditySensorDisplay, AsyncTemperatureHumiditySensorOutdoor, AsyncTemperatureHumiditySensorWithoutDisplay, AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro) from homematicip.aio.home import AsyncHome from homematicip.base.enums import ValveState from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_POWER, DEVICE_CLASS_TEMPERATURE, POWER_WATT, TEMP_CELSIUS) from homeassistant.core import HomeAssistant from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice _LOGGER = logging.getLogger(__name__) ATTR_TEMPERATURE_OFFSET = 'temperature_offset' ATTR_WIND_DIRECTION = 'wind_direction' ATTR_WIND_DIRECTION_VARIATION = 'wind_direction_variation_in_degree' async def async_setup_platform( hass, config, async_add_entities, discovery_info=None): """Set up the HomematicIP Cloud sensors devices.""" pass async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None: """Set up the HomematicIP Cloud sensors from a config entry.""" home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home devices = [HomematicipAccesspointStatus(home)] for device in home.devices: if isinstance(device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)): devices.append(HomematicipHeatingThermostat(home, device)) devices.append(HomematicipTemperatureSensor(home, device)) if isinstance(device, (AsyncTemperatureHumiditySensorDisplay, AsyncTemperatureHumiditySensorWithoutDisplay, AsyncTemperatureHumiditySensorOutdoor, AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)): devices.append(HomematicipTemperatureSensor(home, device)) devices.append(HomematicipHumiditySensor(home, device)) if isinstance(device, (AsyncLightSensor, AsyncMotionDetectorIndoor, AsyncMotionDetectorOutdoor, AsyncMotionDetectorPushButton, AsyncPresenceDetectorIndoor, AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)): devices.append(HomematicipIlluminanceSensor(home, device)) if isinstance(device, (AsyncPlugableSwitchMeasuring, AsyncBrandSwitchMeasuring, AsyncFullFlushSwitchMeasuring)): devices.append(HomematicipPowerSensor(home, device)) if isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)): devices.append(HomematicipWindspeedSensor(home, device)) if isinstance(device, (AsyncWeatherSensorPlus, AsyncWeatherSensorPro)): devices.append(HomematicipTodayRainSensor(home, device)) if devices: async_add_entities(devices) class HomematicipAccesspointStatus(HomematicipGenericDevice): """Representation of an HomeMaticIP Cloud access point.""" def __init__(self, home: AsyncHome) -> None: """Initialize access point device.""" super().__init__(home, home) @property def device_info(self): """Return device specific attributes.""" # Adds a sensor to the existing HAP device return { 'identifiers': { # Serial numbers of Homematic IP device (HMIPC_DOMAIN, self._device.id) } } @property def icon(self) -> str: """Return the icon of the access point device.""" return 'mdi:access-point-network' @property def state(self) -> float: """Return the state of the access point.""" return self._home.dutyCycle @property def available(self) -> bool: """Device available.""" return self._home.connected @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return '%' class HomematicipHeatingThermostat(HomematicipGenericDevice): """Represenation of a HomematicIP heating thermostat device.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize heating thermostat device.""" super().__init__(home, device, 'Heating') @property def icon(self) -> str: """Return the icon.""" if super().icon: return super().icon if self._device.valveState != ValveState.ADAPTION_DONE: return 'mdi:alert' return 'mdi:radiator' @property def state(self) -> int: """Return the state of the radiator valve.""" if self._device.valveState != ValveState.ADAPTION_DONE: return self._device.valveState return round(self._device.valvePosition*100) @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return '%' class HomematicipHumiditySensor(HomematicipGenericDevice): """Represenation of a HomematicIP Cloud humidity device.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the thermometer device.""" super().__init__(home, device, 'Humidity') @property def device_class(self) -> str: """Return the device class of the sensor.""" return DEVICE_CLASS_HUMIDITY @property def state(self) -> int: """Return the state.""" return self._device.humidity @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return '%' class HomematicipTemperatureSensor(HomematicipGenericDevice): """Representation of a HomematicIP Cloud thermometer device.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the thermometer device.""" super().__init__(home, device, 'Temperature') @property def device_class(self) -> str: """Return the device class of the sensor.""" return DEVICE_CLASS_TEMPERATURE @property def state(self) -> float: """Return the state.""" if hasattr(self._device, 'valveActualTemperature'): return self._device.valveActualTemperature return self._device.actualTemperature @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return TEMP_CELSIUS @property def device_state_attributes(self): """Return the state attributes of the windspeed sensor.""" attr = super().device_state_attributes if hasattr(self._device, 'temperatureOffset') and \ self._device.temperatureOffset: attr[ATTR_TEMPERATURE_OFFSET] = self._device.temperatureOffset return attr class HomematicipIlluminanceSensor(HomematicipGenericDevice): """Represenation of a HomematicIP Illuminance device.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the device.""" super().__init__(home, device, 'Illuminance') @property def device_class(self) -> str: """Return the device class of the sensor.""" return DEVICE_CLASS_ILLUMINANCE @property def state(self) -> float: """Return the state.""" if hasattr(self._device, 'averageIllumination'): return self._device.averageIllumination return self._device.illumination @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return 'lx' class HomematicipPowerSensor(HomematicipGenericDevice): """Represenation of a HomematicIP power measuring device.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the device.""" super().__init__(home, device, 'Power') @property def device_class(self) -> str: """Return the device class of the sensor.""" return DEVICE_CLASS_POWER @property def state(self) -> float: """Represenation of the HomematicIP power comsumption value.""" return self._device.currentPowerConsumption @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return POWER_WATT class HomematicipWindspeedSensor(HomematicipGenericDevice): """Represenation of a HomematicIP wind speed sensor.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the device.""" super().__init__(home, device, 'Windspeed') @property def state(self) -> float: """Represenation of the HomematicIP wind speed value.""" return self._device.windSpeed @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return 'km/h' @property def device_state_attributes(self): """Return the state attributes of the wind speed sensor.""" attr = super().device_state_attributes if hasattr(self._device, 'windDirection') and \ self._device.windDirection: attr[ATTR_WIND_DIRECTION] = \ _get_wind_direction(self._device.windDirection) if hasattr(self._device, 'windDirectionVariation') and \ self._device.windDirectionVariation: attr[ATTR_WIND_DIRECTION_VARIATION] = \ self._device.windDirectionVariation return attr class HomematicipTodayRainSensor(HomematicipGenericDevice): """Represenation of a HomematicIP rain counter of a day sensor.""" def __init__(self, home: AsyncHome, device) -> None: """Initialize the device.""" super().__init__(home, device, 'Today Rain') @property def state(self) -> float: """Represenation of the HomematicIP todays rain value.""" return round(self._device.todayRainCounter, 2) @property def unit_of_measurement(self) -> str: """Return the unit this state is expressed in.""" return 'mm' def _get_wind_direction(wind_direction_degree: float) -> str: """Convert wind direction degree to named direction.""" if 11.25 <= wind_direction_degree < 33.75: return 'NNE' if 33.75 <= wind_direction_degree < 56.25: return 'NE' if 56.25 <= wind_direction_degree < 78.75: return 'ENE' if 78.75 <= wind_direction_degree < 101.25: return 'E' if 101.25 <= wind_direction_degree < 123.75: return 'ESE' if 123.75 <= wind_direction_degree < 146.25: return 'SE' if 146.25 <= wind_direction_degree < 168.75: return 'SSE' if 168.75 <= wind_direction_degree < 191.25: return 'S' if 191.25 <= wind_direction_degree < 213.75: return 'SSW' if 213.75 <= wind_direction_degree < 236.25: return 'SW' if 236.25 <= wind_direction_degree < 258.75: return 'WSW' if 258.75 <= wind_direction_degree < 281.25: return 'W' if 281.25 <= wind_direction_degree < 303.75: return 'WNW' if 303.75 <= wind_direction_degree < 326.25: return 'NW' if 326.25 <= wind_direction_degree < 348.75: return 'NNW' return 'N'
apache-2.0
4,209,050,737,572,543,000
-955,323,316,984,537,200
34.83631
76
0.63234
false
vhaupert/mitmproxy
test/pathod/tservers.py
4
3619
import os import tempfile import re import shutil import requests import io import urllib from mitmproxy.net import tcp from mitmproxy.utils import data from pathod import language from pathod import pathoc from pathod import pathod from pathod import test from pathod.pathod import CA_CERT_NAME cdata = data.Data(__name__) def treader(bytes): """ Construct a tcp.Read object from bytes. """ fp = io.BytesIO(bytes) return tcp.Reader(fp) class DaemonTests: nohang = False ssl = False timeout = None hexdump = False ssloptions = None nocraft = False explain = True @classmethod def setup_class(cls): opts = cls.ssloptions or {} cls.confdir = tempfile.mkdtemp() opts["confdir"] = cls.confdir so = pathod.SSLOptions(**opts) cls.d = test.Daemon( staticdir=cdata.path("data"), anchors=[ (re.compile("/anchor/.*"), "202:da") ], ssl=cls.ssl, ssloptions=so, sizelimit=1 * 1024 * 1024, nohang=cls.nohang, timeout=cls.timeout, hexdump=cls.hexdump, nocraft=cls.nocraft, logreq=True, logresp=True, explain=cls.explain ) @classmethod def teardown_class(cls): cls.d.shutdown() shutil.rmtree(cls.confdir) def teardown(self): self.d.wait_for_silence() self.d.clear_log() def _getpath(self, path, params=None): scheme = "https" if self.ssl else "http" resp = requests.get( "%s://localhost:%s/%s" % ( scheme, self.d.port, path ), verify=os.path.join(self.d.thread.server.ssloptions.confdir, CA_CERT_NAME), params=params ) return resp def getpath(self, path, params=None): logfp = io.StringIO() c = pathoc.Pathoc( ("localhost", self.d.port), ssl=self.ssl, fp=logfp, ) with c.connect(): if params: path = path + "?" + urllib.parse.urlencode(params) resp = c.request("get:%s" % path) return resp def get(self, spec): logfp = io.StringIO() c = pathoc.Pathoc( ("localhost", self.d.port), ssl=self.ssl, fp=logfp, ) with c.connect(): resp = c.request( "get:/p/%s" % urllib.parse.quote(spec) ) return resp def pathoc( self, specs, timeout=None, connect_to=None, ssl=None, ws_read_limit=None, use_http2=False, ): """ Returns a (messages, text log) tuple. """ if ssl is None: ssl = self.ssl logfp = io.StringIO() c = pathoc.Pathoc( ("localhost", self.d.port), ssl=ssl, ws_read_limit=ws_read_limit, timeout=timeout, fp=logfp, use_http2=use_http2, ) with c.connect(connect_to): ret = [] for i in specs: resp = c.request(i) if resp: ret.append(resp) for frm in c.wait(): ret.append(frm) c.stop() return ret, logfp.getvalue() def render(r, settings=language.Settings()): r = r.resolve(settings) s = io.BytesIO() assert language.serve(r, s, settings) return s.getvalue()
mit
-8,462,553,166,322,333,000
-5,837,591,650,803,643,000
23.126667
87
0.505112
false
enthought/traitsgui
enthought/pyface/message_dialog.py
3
1716
#------------------------------------------------------------------------------ # Copyright (c) 2005, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # Thanks for using Enthought open source! # # Author: Enthought, Inc. # Description: <Enthought pyface package component> #------------------------------------------------------------------------------ """ The implementation of a dialog that displays a message. """ # Convenience functions. def information(parent, message, title='Information'): """ Convenience function to show an information message dialog. """ dialog = MessageDialog( parent=parent, message=message, title=title, severity='information' ) dialog.open() return def warning(parent, message, title='Warning'): """ Convenience function to show a warning message dialog. """ dialog = MessageDialog( parent=parent, message=message, title=title, severity='warning' ) dialog.open() return def error(parent, message, title='Error'): """ Convenience function to show an error message dialog. """ dialog = MessageDialog( parent=parent, message=message, title=title, severity='error' ) dialog.open() return # Import the toolkit specific version. from toolkit import toolkit_object MessageDialog = toolkit_object('message_dialog:MessageDialog') #### EOF ######################################################################
bsd-3-clause
-8,079,114,470,386,186,000
-5,434,850,325,353,570,000
31.377358
79
0.621795
false
Asquera/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Tool/sunf90.py
61
2180
"""SCons.Tool.sunf90 Tool-specific initialization for sunf90, the Sun Studio F90 compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sunf90.py 5134 2010/08/16 23:02:40 bdeegan" import SCons.Util from FortranCommon import add_all_to_env compilers = ['sunf90', 'f90'] def generate(env): """Add Builders and construction variables for sun f90 compiler to an Environment.""" add_all_to_env(env) fcomp = env.Detect(compilers) or 'f90' env['FORTRAN'] = fcomp env['F90'] = fcomp env['SHFORTRAN'] = '$FORTRAN' env['SHF90'] = '$F90' env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC') env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC') def exists(env): return env.Detect(compilers) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
174,127,233,967,690,180
-1,784,038,856,795,342,600
33.0625
95
0.731651
false
rally12/deep-learning
language-translation/problem_unittests.py
98
13080
import numpy as np import tensorflow as tf import itertools import collections import helper def _print_success_message(): print('Tests Passed') def test_text_to_ids(text_to_ids): test_source_text = 'new jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .' test_target_text = 'new jersey est parfois calme pendant l\' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .' test_source_text = test_source_text.lower() test_target_text = test_target_text.lower() source_vocab_to_int, source_int_to_vocab = helper.create_lookup_tables(test_source_text) target_vocab_to_int, target_int_to_vocab = helper.create_lookup_tables(test_target_text) test_source_id_seq, test_target_id_seq = text_to_ids(test_source_text, test_target_text, source_vocab_to_int, target_vocab_to_int) assert len(test_source_id_seq) == len(test_source_text.split('\n')),\ 'source_id_text has wrong length, it should be {}.'.format(len(test_source_text.split('\n'))) assert len(test_target_id_seq) == len(test_target_text.split('\n')), \ 'target_id_text has wrong length, it should be {}.'.format(len(test_target_text.split('\n'))) target_not_iter = [type(x) for x in test_source_id_seq if not isinstance(x, collections.Iterable)] assert not target_not_iter,\ 'Element in source_id_text is not iteratable. Found type {}'.format(target_not_iter[0]) target_not_iter = [type(x) for x in test_target_id_seq if not isinstance(x, collections.Iterable)] assert not target_not_iter, \ 'Element in target_id_text is not iteratable. Found type {}'.format(target_not_iter[0]) source_changed_length = [(words, word_ids) for words, word_ids in zip(test_source_text.split('\n'), test_source_id_seq) if len(words.split()) != len(word_ids)] assert not source_changed_length,\ 'Source text changed in size from {} word(s) to {} id(s): {}'.format( len(source_changed_length[0][0].split()), len(source_changed_length[0][1]), source_changed_length[0][1]) target_missing_end = [word_ids for word_ids in test_target_id_seq if word_ids[-1] != target_vocab_to_int['<EOS>']] assert not target_missing_end,\ 'Missing <EOS> id at the end of {}'.format(target_missing_end[0]) target_bad_size = [(words.split(), word_ids) for words, word_ids in zip(test_target_text.split('\n'), test_target_id_seq) if len(word_ids) != len(words.split()) + 1] assert not target_bad_size,\ 'Target text incorrect size. {} should be length {}'.format( target_bad_size[0][1], len(target_bad_size[0][0]) + 1) source_bad_id = [(word, word_id) for word, word_id in zip( [word for sentence in test_source_text.split('\n') for word in sentence.split()], itertools.chain.from_iterable(test_source_id_seq)) if source_vocab_to_int[word] != word_id] assert not source_bad_id,\ 'Source word incorrectly converted from {} to id {}.'.format(source_bad_id[0][0], source_bad_id[0][1]) target_bad_id = [(word, word_id) for word, word_id in zip( [word for sentence in test_target_text.split('\n') for word in sentence.split()], [word_id for word_ids in test_target_id_seq for word_id in word_ids[:-1]]) if target_vocab_to_int[word] != word_id] assert not target_bad_id,\ 'Target word incorrectly converted from {} to id {}.'.format(target_bad_id[0][0], target_bad_id[0][1]) _print_success_message() def test_model_inputs(model_inputs): with tf.Graph().as_default(): input_data, targets, lr, keep_prob = model_inputs() # Check type assert input_data.op.type == 'Placeholder',\ 'Input is not a Placeholder.' assert targets.op.type == 'Placeholder',\ 'Targets is not a Placeholder.' assert lr.op.type == 'Placeholder',\ 'Learning Rate is not a Placeholder.' assert keep_prob.op.type == 'Placeholder', \ 'Keep Probability is not a Placeholder.' # Check name assert input_data.name == 'input:0',\ 'Input has bad name. Found name {}'.format(input_data.name) assert keep_prob.name == 'keep_prob:0', \ 'Keep Probability has bad name. Found name {}'.format(keep_prob.name) assert tf.assert_rank(input_data, 2, message='Input data has wrong rank') assert tf.assert_rank(targets, 2, message='Targets has wrong rank') assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank') assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank') _print_success_message() def test_encoding_layer(encoding_layer): rnn_size = 512 batch_size = 64 num_layers = 3 with tf.Graph().as_default(): rnn_inputs = tf.placeholder(tf.float32, [batch_size, 22, 1000]) keep_prob = tf.placeholder(tf.float32) states = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob) assert len(states) == num_layers,\ 'Found {} state(s). It should be {} states.'.format(len(states), num_layers) bad_types = [type(state) for state in states if not isinstance(state, tf.contrib.rnn.LSTMStateTuple)] assert not bad_types,\ 'Found wrong type: {}'.format(bad_types[0]) bad_shapes = [state_tensor.get_shape() for state in states for state_tensor in state if state_tensor.get_shape().as_list() not in [[None, rnn_size], [batch_size, rnn_size]]] assert not bad_shapes,\ 'Found wrong shape: {}'.format(bad_shapes[0]) _print_success_message() def test_decoding_layer(decoding_layer): batch_size = 64 vocab_size = 1000 embedding_size = 200 sequence_length = 22 rnn_size = 512 num_layers = 3 target_vocab_to_int = {'<EOS>': 1, '<GO>': 3} with tf.Graph().as_default(): dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size]) dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size]) keep_prob = tf.placeholder(tf.float32) state = tf.contrib.rnn.LSTMStateTuple( tf.placeholder(tf.float32, [None, rnn_size]), tf.placeholder(tf.float32, [None, rnn_size])) encoder_state = (state, state, state) train_output, inf_output = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob) assert isinstance(train_output, tf.Tensor),\ 'Train Logits is wrong type: {}'.format(type(train_output)) assert isinstance(inf_output, tf.Tensor), \ 'Inference Logits is wrong type: {}'.format(type(inf_output)) assert train_output.get_shape().as_list() == [batch_size, None, vocab_size],\ 'Train Logits is the wrong shape: {}'.format(train_output.get_shape()) assert inf_output.get_shape().as_list() == [None, None, vocab_size], \ 'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape()) _print_success_message() def test_seq2seq_model(seq2seq_model): batch_size = 64 target_vocab_size = 300 sequence_length = 22 rnn_size = 512 num_layers = 3 target_vocab_to_int = {'<EOS>': 1, '<GO>': 3} with tf.Graph().as_default(): input_data = tf.placeholder(tf.int32, [64, 22]) target_data = tf.placeholder(tf.int32, [64, 22]) keep_prob = tf.placeholder(tf.float32) train_output, inf_output = seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, 200, target_vocab_size, 64, 80, rnn_size, num_layers, target_vocab_to_int) assert isinstance(train_output, tf.Tensor),\ 'Train Logits is wrong type: {}'.format(type(train_output)) assert isinstance(inf_output, tf.Tensor), \ 'Inference Logits is wrong type: {}'.format(type(inf_output)) assert train_output.get_shape().as_list() == [batch_size, None, target_vocab_size],\ 'Train Logits is the wrong shape: {}'.format(train_output.get_shape()) assert inf_output.get_shape().as_list() == [None, None, target_vocab_size], \ 'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape()) _print_success_message() def test_sentence_to_seq(sentence_to_seq): sentence = 'this is a test sentence' vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4} output = sentence_to_seq(sentence, vocab_to_int) assert len(output) == 5,\ 'Wrong length. Found a length of {}'.format(len(output)) assert output[3] == 2,\ 'Missing <UNK> id.' assert np.array_equal(output, [3, 6, 5, 2, 4]),\ 'Incorrect ouput. Found {}'.format(output) _print_success_message() def test_process_decoding_input(process_decoding_input): batch_size = 2 seq_length = 3 target_vocab_to_int = {'<GO>': 3} with tf.Graph().as_default(): target_data = tf.placeholder(tf.int32, [batch_size, seq_length]) dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size) assert dec_input.get_shape() == (batch_size, seq_length),\ 'Wrong shape returned. Found {}'.format(dec_input.get_shape()) test_target_data = [[10, 20, 30], [40, 18, 23]] with tf.Session() as sess: test_dec_input = sess.run(dec_input, {target_data: test_target_data}) assert test_dec_input[0][0] == target_vocab_to_int['<GO>'] and\ test_dec_input[1][0] == target_vocab_to_int['<GO>'],\ 'Missing GO Id.' _print_success_message() def test_decoding_layer_train(decoding_layer_train): batch_size = 64 vocab_size = 1000 embedding_size = 200 sequence_length = 22 rnn_size = 512 num_layers = 3 with tf.Graph().as_default(): with tf.variable_scope("decoding") as decoding_scope: dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers) output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope) dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size]) keep_prob = tf.placeholder(tf.float32) state = tf.contrib.rnn.LSTMStateTuple( tf.placeholder(tf.float32, [None, rnn_size]), tf.placeholder(tf.float32, [None, rnn_size])) encoder_state = (state, state, state) train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) assert train_logits.get_shape().as_list() == [batch_size, None, vocab_size], \ 'Wrong shape returned. Found {}'.format(train_logits.get_shape()) _print_success_message() def test_decoding_layer_infer(decoding_layer_infer): vocab_size = 1000 sequence_length = 22 embedding_size = 200 rnn_size = 512 num_layers = 3 with tf.Graph().as_default(): with tf.variable_scope("decoding") as decoding_scope: dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers) output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope) dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size]) keep_prob = tf.placeholder(tf.float32) state = tf.contrib.rnn.LSTMStateTuple( tf.placeholder(tf.float32, [None, rnn_size]), tf.placeholder(tf.float32, [None, rnn_size])) encoder_state = (state, state, state) infer_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, 10, 20, sequence_length, vocab_size, decoding_scope, output_fn, keep_prob) assert infer_logits.get_shape().as_list() == [None, None, vocab_size], \ 'Wrong shape returned. Found {}'.format(infer_logits.get_shape()) _print_success_message()
mit
-8,159,529,481,467,394,000
4,529,879,896,974,182,000
45.180212
357
0.614737
false
earshel/PokeyPyManager
POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse_pb2.py
16
5285
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto', package='POGOProtos.Networking.Responses', syntax='proto3', serialized_pb=_b('\nGPOGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x97\x02\n!CollectDailyDefenderBonusResponse\x12Y\n\x06result\x18\x01 \x01(\x0e\x32I.POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.Result\x12\x15\n\rcurrency_type\x18\x02 \x03(\t\x12\x18\n\x10\x63urrency_awarded\x18\x03 \x03(\x05\x12\x17\n\x0f\x64\x65\x66\x65nders_count\x18\x04 \x01(\x05\"M\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x0b\n\x07\x46\x41ILURE\x10\x02\x12\x0c\n\x08TOO_SOON\x10\x03\x12\x10\n\x0cNO_DEFENDERS\x10\x04\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT = _descriptor.EnumDescriptor( name='Result', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.Result', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNSET', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SUCCESS', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='FAILURE', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='TOO_SOON', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='NO_DEFENDERS', index=4, number=4, options=None, type=None), ], containing_type=None, options=None, serialized_start=311, serialized_end=388, ) _sym_db.RegisterEnumDescriptor(_COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT) _COLLECTDAILYDEFENDERBONUSRESPONSE = _descriptor.Descriptor( name='CollectDailyDefenderBonusResponse', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='result', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.result', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='currency_type', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.currency_type', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='currency_awarded', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.currency_awarded', index=2, number=3, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='defenders_count', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.defenders_count', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=109, serialized_end=388, ) _COLLECTDAILYDEFENDERBONUSRESPONSE.fields_by_name['result'].enum_type = _COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT _COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT.containing_type = _COLLECTDAILYDEFENDERBONUSRESPONSE DESCRIPTOR.message_types_by_name['CollectDailyDefenderBonusResponse'] = _COLLECTDAILYDEFENDERBONUSRESPONSE CollectDailyDefenderBonusResponse = _reflection.GeneratedProtocolMessageType('CollectDailyDefenderBonusResponse', (_message.Message,), dict( DESCRIPTOR = _COLLECTDAILYDEFENDERBONUSRESPONSE, __module__ = 'POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse) )) _sym_db.RegisterMessage(CollectDailyDefenderBonusResponse) # @@protoc_insertion_point(module_scope)
mit
8,781,224,014,816,962,000
9,004,022,251,899,286,000
40.614173
627
0.756481
false
catapult-project/catapult-csm
third_party/gsutil/third_party/httplib2/python2/httplib2/__init__.py
29
69586
from __future__ import generators """ httplib2 A caching http interface that supports ETags and gzip to conserve bandwidth. Requires Python 2.3 or later Changelog: 2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", "James Antill", "Xavier Verges Farrero", "Jonathan Feinberg", "Blair Zajac", "Sam Ruby", "Louis Nyffenegger"] __license__ = "MIT" __version__ = "0.7.7" import re import sys import email import email.Utils import email.Message import email.FeedParser import StringIO import gzip import zlib import httplib import urlparse import urllib import base64 import os import copy import calendar import time import random import errno try: from hashlib import sha1 as _sha, md5 as _md5 except ImportError: # prior to Python 2.5, these were separate modules import sha import md5 _sha = sha.new _md5 = md5.new import hmac from gettext import gettext as _ import socket try: from httplib2 import socks except ImportError: try: import socks except (ImportError, AttributeError): socks = None # Build the appropriate socket wrapper for ssl try: import ssl # python 2.6 ssl_SSLError = ssl.SSLError def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs): if disable_validation: cert_reqs = ssl.CERT_NONE else: cert_reqs = ssl.CERT_REQUIRED # We should be specifying SSL version 3 or TLS v1, but the ssl module # doesn't expose the necessary knobs. So we need to go with the default # of SSLv23. return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file, cert_reqs=cert_reqs, ca_certs=ca_certs) except (AttributeError, ImportError): ssl_SSLError = None def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs): if not disable_validation: raise CertificateValidationUnsupported( "SSL certificate validation is not supported without " "the ssl module installed. To avoid this error, install " "the ssl module, or explicity disable validation.") ssl_sock = socket.ssl(sock, key_file, cert_file) return httplib.FakeSocket(sock, ssl_sock) if sys.version_info >= (2,3): from iri2uri import iri2uri else: def iri2uri(uri): return uri def has_timeout(timeout): # python 2.6 if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'): return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT) return (timeout is not None) __all__ = [ 'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', 'debuglevel', 'ProxiesUnavailableError'] # The httplib debug level, set to a non-zero value to get debug output debuglevel = 0 # A request will be tried 'RETRIES' times if it fails at the socket/connection level. RETRIES = 2 # Python 2.3 support if sys.version_info < (2,4): def sorted(seq): seq.sort() return seq # Python 2.3 support def HTTPResponse__getheaders(self): """Return list of (header, value) tuples.""" if self.msg is None: raise httplib.ResponseNotReady() return self.msg.items() if not hasattr(httplib.HTTPResponse, 'getheaders'): httplib.HTTPResponse.getheaders = HTTPResponse__getheaders # All exceptions raised here derive from HttpLib2Error class HttpLib2Error(Exception): pass # Some exceptions can be caught and optionally # be turned back into responses. class HttpLib2ErrorWithResponse(HttpLib2Error): def __init__(self, desc, response, content): self.response = response self.content = content HttpLib2Error.__init__(self, desc) class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass class RedirectLimit(HttpLib2ErrorWithResponse): pass class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class MalformedHeader(HttpLib2Error): pass class RelativeURIError(HttpLib2Error): pass class ServerNotFoundError(HttpLib2Error): pass class ProxiesUnavailableError(HttpLib2Error): pass class CertificateValidationUnsupported(HttpLib2Error): pass class SSLHandshakeError(HttpLib2Error): pass class NotSupportedOnThisPlatform(HttpLib2Error): pass class CertificateHostnameMismatch(SSLHandshakeError): def __init__(self, desc, host, cert): HttpLib2Error.__init__(self, desc) self.host = host self.cert = cert # Open Items: # ----------- # Proxy support # Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) # Pluggable cache storage (supports storing the cache in # flat files by default. We need a plug-in architecture # that can support Berkeley DB and Squid) # == Known Issues == # Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. # Does not handle Cache-Control: max-stale # Does not use Age: headers when calculating cache freshness. # The number of redirections to follow before giving up. # Note that only GET redirects are automatically followed. # Will also honor 301 requests by saving that info and never # requesting that URI again. DEFAULT_MAX_REDIRECTS = 5 try: # Users can optionally provide a module that tells us where the CA_CERTS # are located. import ca_certs_locater CA_CERTS = ca_certs_locater.get() except ImportError: # Default CA certificates file bundled with httplib2. CA_CERTS = os.path.join( os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt") # Which headers are hop-by-hop headers by default HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] def _get_end2end_headers(response): hopbyhop = list(HOP_BY_HOP) hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) return [header for header in response.keys() if header not in hopbyhop] URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) def urlnorm(uri): (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) authority = authority.lower() scheme = scheme.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path scheme = scheme.lower() defrag_uri = scheme + "://" + authority + request_uri return scheme, authority, request_uri, defrag_uri # Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) re_url_scheme = re.compile(r'^\w+://') re_slash = re.compile(r'[?/:|]+') def safename(filename): """Return a filename suitable for the cache. Strips dangerous and common characters to create a filename we can use to store the cache in. """ try: if re_url_scheme.match(filename): if isinstance(filename,str): filename = filename.decode('utf-8') filename = filename.encode('idna') else: filename = filename.encode('idna') except UnicodeError: pass if isinstance(filename,unicode): filename=filename.encode('utf-8') filemd5 = _md5(filename).hexdigest() filename = re_url_scheme.sub("", filename) filename = re_slash.sub(",", filename) # limit length of filename if len(filename)>200: filename=filename[:200] return ",".join((filename, filemd5)) NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') def _normalize_headers(headers): return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) def _parse_cache_control(headers): retval = {} if headers.has_key('cache-control'): parts = headers['cache-control'].split(',') parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")] parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] retval = dict(parts_with_args + parts_wo_args) return retval # Whether to use a strict mode to parse WWW-Authenticate headers # Might lead to bad results in case of ill-formed header value, # so disabled by default, falling back to relaxed parsing. # Set to true to turn on, usefull for testing servers. USE_WWW_AUTH_STRICT_PARSING = 0 # In regex below: # [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP # "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space # Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: # \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") UNQUOTE_PAIRS = re.compile(r'\\(.)') def _parse_www_authenticate(headers, headername='www-authenticate'): """Returns a dictionary of dictionaries, one dict per auth_scheme.""" retval = {} if headers.has_key(headername): try: authenticate = headers[headername].strip() www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED while authenticate: # Break off the scheme at the beginning of the line if headername == 'authentication-info': (auth_scheme, the_rest) = ('digest', authenticate) else: (auth_scheme, the_rest) = authenticate.split(" ", 1) # Now loop over all the key value pairs that come after the scheme, # being careful not to roll into the next scheme match = www_auth.search(the_rest) auth_params = {} while match: if match and len(match.groups()) == 3: (key, value, the_rest) = match.groups() auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) match = www_auth.search(the_rest) retval[auth_scheme.lower()] = auth_params authenticate = the_rest.strip() except ValueError: raise MalformedHeader("WWW-Authenticate") return retval def _entry_disposition(response_headers, request_headers): """Determine freshness from the Date, Expires and Cache-Control headers. We don't handle the following: 1. Cache-Control: max-stale 2. Age: headers are not used in the calculations. Not that this algorithm is simpler than you might think because we are operating as a private (non-shared) cache. This lets us ignore 's-maxage'. We can also ignore 'proxy-invalidate' since we aren't a proxy. We will never return a stale document as fresh as a design decision, and thus the non-implementation of 'max-stale'. This also lets us safely ignore 'must-revalidate' since we operate as if every server has sent 'must-revalidate'. Since we are private we get to ignore both 'public' and 'private' parameters. We also ignore 'no-transform' since we don't do any transformations. The 'no-store' parameter is handled at a higher level. So the only Cache-Control parameters we look at are: no-cache only-if-cached max-age min-fresh """ retval = "STALE" cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: retval = "TRANSPARENT" if 'cache-control' not in request_headers: request_headers['cache-control'] = 'no-cache' elif cc.has_key('no-cache'): retval = "TRANSPARENT" elif cc_response.has_key('no-cache'): retval = "STALE" elif cc.has_key('only-if-cached'): retval = "FRESH" elif response_headers.has_key('date'): date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) now = time.time() current_age = max(0, now - date) if cc_response.has_key('max-age'): try: freshness_lifetime = int(cc_response['max-age']) except ValueError: freshness_lifetime = 0 elif response_headers.has_key('expires'): expires = email.Utils.parsedate_tz(response_headers['expires']) if None == expires: freshness_lifetime = 0 else: freshness_lifetime = max(0, calendar.timegm(expires) - date) else: freshness_lifetime = 0 if cc.has_key('max-age'): try: freshness_lifetime = int(cc['max-age']) except ValueError: freshness_lifetime = 0 if cc.has_key('min-fresh'): try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 current_age += min_fresh if freshness_lifetime > current_age: retval = "FRESH" return retval def _decompressContent(response, new_content): content = new_content try: encoding = response.get('content-encoding', None) if encoding in ['gzip', 'deflate']: if encoding == 'gzip': content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() if encoding == 'deflate': content = zlib.decompress(content) response['content-length'] = str(len(content)) # Record the historical presence of the encoding in a way the won't interfere. response['-content-encoding'] = response['content-encoding'] del response['content-encoding'] except IOError: content = "" raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) return content def _updateCache(request_headers, response_headers, content, cache, cachekey): if cachekey: cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if cc.has_key('no-store') or cc_response.has_key('no-store'): cache.delete(cachekey) else: info = email.Message.Message() for key, value in response_headers.iteritems(): if key not in ['status','content-encoding','transfer-encoding']: info[key] = value # Add annotations to the cache to indicate what headers # are variant for this request. vary = response_headers.get('vary', None) if vary: vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header try: info[key] = request_headers[header] except KeyError: pass status = response_headers.status if status == 304: status = 200 status_header = 'status: %d\r\n' % status header_str = info.as_string() header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) text = "".join([status_header, header_str, content]) cache.set(cachekey, text) def _cnonce(): dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() return dig[:16] def _wsse_username_token(cnonce, iso_now, password): return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() # For credentials we need two things, first # a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) # Then we also need a list of URIs that have already demanded authentication # That list is tricky since sub-URIs can take the same auth, or the # auth scheme may change as you descend the tree. # So we also need each Auth instance to be able to tell us # how close to the 'top' it is. class Authentication(object): def __init__(self, credentials, host, request_uri, headers, response, content, http): (scheme, authority, path, query, fragment) = parse_uri(request_uri) self.path = path self.host = host self.credentials = credentials self.http = http def depth(self, request_uri): (scheme, authority, path, query, fragment) = parse_uri(request_uri) return request_uri[len(self.path):].count("/") def inscope(self, host, request_uri): # XXX Should we normalize the request_uri? (scheme, authority, path, query, fragment) = parse_uri(request_uri) return (host == self.host) and path.startswith(self.path) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header. Over-ride this in sub-classes.""" pass def response(self, response, content): """Gives us a chance to update with new nonces or such returned from the last authorized response. Over-rise this in sub-classes if necessary. Return TRUE is the request is to be retried, for example Digest may return stale=true. """ return False class BasicAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip() class DigestAuthentication(Authentication): """Only do qop='auth' and MD5, since that is all Apache currently implements""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['digest'] qop = self.challenge.get('qop', 'auth') self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None if self.challenge['qop'] is None: raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper() if self.challenge['algorithm'] != 'MD5': raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) self.challenge['nc'] = 1 def request(self, method, request_uri, headers, content, cnonce = None): """Modify the request headers""" H = lambda x: _md5(x).hexdigest() KD = lambda s, d: H("%s:%s" % (s, d)) A2 = "".join([method, ":", request_uri]) self.challenge['cnonce'] = cnonce or _cnonce() request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % ( self.challenge['nonce'], '%08x' % self.challenge['nc'], self.challenge['cnonce'], self.challenge['qop'], H(A2))) headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['nonce'], request_uri, self.challenge['algorithm'], request_digest, self.challenge['qop'], self.challenge['nc'], self.challenge['cnonce']) if self.challenge.get('opaque'): headers['authorization'] += ', opaque="%s"' % self.challenge['opaque'] self.challenge['nc'] += 1 def response(self, response, content): if not response.has_key('authentication-info'): challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) if 'true' == challenge.get('stale'): self.challenge['nonce'] = challenge['nonce'] self.challenge['nc'] = 1 return True else: updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) if updated_challenge.has_key('nextnonce'): self.challenge['nonce'] = updated_challenge['nextnonce'] self.challenge['nc'] = 1 return False class HmacDigestAuthentication(Authentication): """Adapted from Robert Sayre's code and DigestAuthentication above.""" __author__ = "Thomas Broyer (t.broyer@ltgt.net)" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['hmacdigest'] # TODO: self.challenge['domain'] self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') if self.challenge['reason'] not in ['unauthorized', 'integrity']: self.challenge['reason'] = 'unauthorized' self.challenge['salt'] = self.challenge.get('salt', '') if not self.challenge.get('snonce'): raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) if self.challenge['algorithm'] == 'HMAC-MD5': self.hashmod = _md5 else: self.hashmod = _sha if self.challenge['pw-algorithm'] == 'MD5': self.pwhashmod = _md5 else: self.pwhashmod = _sha self.key = "".join([self.credentials[0], ":", self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), ":", self.challenge['realm']]) self.key = self.pwhashmod.new(self.key).hexdigest().lower() def request(self, method, request_uri, headers, content): """Modify the request headers""" keys = _get_end2end_headers(headers) keylist = "".join(["%s " % k for k in keys]) headers_val = "".join([headers[k] for k in keys]) created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) cnonce = _cnonce() request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['snonce'], cnonce, request_uri, created, request_digest, keylist) def response(self, response, content): challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) if challenge.get('reason') in ['integrity', 'stale']: return True return False class WsseAuthentication(Authentication): """This is thinly tested and should not be relied upon. At this time there isn't any third party server to test against. Blogger and TypePad implemented this algorithm at one point but Blogger has since switched to Basic over HTTPS and TypePad has implemented it wrong, by never issuing a 401 challenge but instead requiring your client to telepathically know that their endpoint is expecting WSSE profile="UsernameToken".""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'WSSE profile="UsernameToken"' iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) cnonce = _cnonce() password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self.credentials[0], password_digest, cnonce, iso_now) class GoogleLoginAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): from urllib import urlencode Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') service = challenge['googlelogin'].get('service', 'xapi') # Bloggger actually returns the service in the challenge # For the rest we guess based on the URI if service == 'xapi' and request_uri.find("calendar") > 0: service = "cl" # No point in guessing Base or Spreadsheet #elif request_uri.find("spreadsheets") > 0: # service = "wise" auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) lines = content.split('\n') d = dict([tuple(line.split("=", 1)) for line in lines if line]) if resp.status == 403: self.Auth = "" else: self.Auth = d['Auth'] def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'GoogleLogin Auth=' + self.Auth AUTH_SCHEME_CLASSES = { "basic": BasicAuthentication, "wsse": WsseAuthentication, "digest": DigestAuthentication, "hmacdigest": HmacDigestAuthentication, "googlelogin": GoogleLoginAuthentication } AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] class FileCache(object): """Uses a local directory as a store for cached files. Not really safe to use if multiple threads or processes are going to be running on the same cache. """ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior self.cache = cache self.safe = safe if not os.path.exists(cache): os.makedirs(self.cache) def get(self, key): retval = None cacheFullPath = os.path.join(self.cache, self.safe(key)) try: f = file(cacheFullPath, "rb") retval = f.read() f.close() except IOError: pass return retval def set(self, key, value): cacheFullPath = os.path.join(self.cache, self.safe(key)) f = file(cacheFullPath, "wb") f.write(value) f.close() def delete(self, key): cacheFullPath = os.path.join(self.cache, self.safe(key)) if os.path.exists(cacheFullPath): os.remove(cacheFullPath) class Credentials(object): def __init__(self): self.credentials = [] def add(self, name, password, domain=""): self.credentials.append((domain.lower(), name, password)) def clear(self): self.credentials = [] def iter(self, domain): for (cdomain, name, password) in self.credentials: if cdomain == "" or domain == cdomain: yield (name, password) class KeyCerts(Credentials): """Identical to Credentials except that name/password are mapped to key/cert.""" pass class AllHosts(object): pass class ProxyInfo(object): """Collect information required to use a proxy.""" bypass_hosts = () def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX constants. For example: p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) """ self.proxy_type = proxy_type self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_rdns = proxy_rdns self.proxy_user = proxy_user self.proxy_pass = proxy_pass def astuple(self): return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass) def isgood(self): return (self.proxy_host != None) and (self.proxy_port != None) def applies_to(self, hostname): return not self.bypass_host(hostname) def bypass_host(self, hostname): """Has this host been excluded from the proxy config""" if self.bypass_hosts is AllHosts: return True bypass = False for domain in self.bypass_hosts: if hostname.endswith(domain): bypass = True return bypass def proxy_info_from_environment(method='http'): """ Read proxy info from the environment variables. """ if method not in ['http', 'https']: return env_var = method + '_proxy' url = os.environ.get(env_var, os.environ.get(env_var.upper())) if not url: return pi = proxy_info_from_url(url, method) no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', '')) bypass_hosts = [] if no_proxy: bypass_hosts = no_proxy.split(',') # special case, no_proxy=* means all hosts bypassed if no_proxy == '*': bypass_hosts = AllHosts pi.bypass_hosts = bypass_hosts return pi def proxy_info_from_url(url, method='http'): """ Construct a ProxyInfo from a URL (such as http_proxy env var) """ url = urlparse.urlparse(url) username = None password = None port = None if '@' in url[1]: ident, host_port = url[1].split('@', 1) if ':' in ident: username, password = ident.split(':', 1) else: password = ident else: host_port = url[1] if ':' in host_port: host, port = host_port.split(':', 1) else: host = host_port if port: port = int(port) else: port = dict(https=443, http=80)[method] proxy_type = 3 # socks.PROXY_TYPE_HTTP return ProxyInfo( proxy_type = proxy_type, proxy_host = host, proxy_port = port, proxy_user = username or None, proxy_pass = password or None, ) class HTTPConnectionWithTimeout(httplib.HTTPConnection): """ HTTPConnection subclass that supports timeouts All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPConnection.__init__(self, host, port, strict) self.timeout = timeout self.proxy_info = proxy_info def connect(self): """Connect to the host and port specified in __init__.""" # Mostly verbatim from httplib.py. if self.proxy_info and socks is None: raise ProxiesUnavailableError( 'Proxy support missing but proxy use was requested!') msg = "getaddrinfo returns an empty list" if self.proxy_info and self.proxy_info.isgood(): use_proxy = True proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() else: use_proxy = False if use_proxy and proxy_rdns: host = proxy_host port = proxy_port else: host = self.host port = self.port for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: if use_proxy: self.sock = socks.socksocket(af, socktype, proto) self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) else: self.sock = socket.socket(af, socktype, proto) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # Different from httplib: support timeouts. if has_timeout(self.timeout): self.sock.settimeout(self.timeout) # End of difference from httplib. if self.debuglevel > 0: print "connect: (%s, %s) ************" % (self.host, self.port) if use_proxy: print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) self.sock.connect((self.host, self.port) + sa[2:]) except socket.error, msg: if self.debuglevel > 0: print "connect fail: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): """ This class allows communication via SSL. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict) self.timeout = timeout self.proxy_info = proxy_info if ca_certs is None: ca_certs = CA_CERTS self.ca_certs = ca_certs self.disable_ssl_certificate_validation = \ disable_ssl_certificate_validation # The following two methods were adapted from https_wrapper.py, released # with the Google Appengine SDK at # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py # under the following license: # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def _GetValidHostsForCert(self, cert): """Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs. """ if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname'] def _ValidateCertificateHostname(self, cert, hostname): """Validates that a given hostname is valid for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. hostname: The hostname to test. Returns: bool: Whether or not the hostname is valid for this certificate. """ hosts = self._GetValidHostsForCert(cert) for host in hosts: host_re = host.replace('.', '\.').replace('*', '[^.]*') if re.search('^%s$' % (host_re,), hostname, re.I): return True return False def connect(self): "Connect to a host on a given (SSL) port." msg = "getaddrinfo returns an empty list" if self.proxy_info and self.proxy_info.isgood(): use_proxy = True proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() else: use_proxy = False if use_proxy and proxy_rdns: host = proxy_host port = proxy_port else: host = self.host port = self.port address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) for family, socktype, proto, canonname, sockaddr in address_info: try: if use_proxy: sock = socks.socksocket(family, socktype, proto) sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) else: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if has_timeout(self.timeout): sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self.sock =_ssl_wrap_socket( sock, self.key_file, self.cert_file, self.disable_ssl_certificate_validation, self.ca_certs) if self.debuglevel > 0: print "connect: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if not self.disable_ssl_certificate_validation: cert = self.sock.getpeercert() hostname = self.host.split(':', 0)[0] if not self._ValidateCertificateHostname(cert, hostname): raise CertificateHostnameMismatch( 'Server presented certificate that does not match ' 'host %s: %s' % (hostname, cert), hostname, cert) except ssl_SSLError, e: if sock: sock.close() if self.sock: self.sock.close() self.sock = None # Unfortunately the ssl module doesn't seem to provide any way # to get at more detailed error information, in particular # whether the error is due to certificate validation or # something else (such as SSL protocol mismatch). if e.errno == ssl.SSL_ERROR_SSL: raise SSLHandshakeError(e) else: raise except (socket.timeout, socket.gaierror): raise except socket.error, msg: if self.debuglevel > 0: print "connect fail: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg SCHEME_TO_CONNECTION = { 'http': HTTPConnectionWithTimeout, 'https': HTTPSConnectionWithTimeout } # Use a different connection object for Google App Engine try: try: from google.appengine.api import apiproxy_stub_map if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: raise ImportError # Bail out; we're not actually running on App Engine. from google.appengine.api.urlfetch import fetch from google.appengine.api.urlfetch import InvalidURLError except ImportError: from google3.apphosting.api import apiproxy_stub_map if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: raise ImportError # Bail out; we're not actually running on App Engine. from google3.apphosting.api.urlfetch import fetch from google3.apphosting.api.urlfetch import InvalidURLError def _new_fixed_fetch(validate_certificate): def fixed_fetch(url, payload=None, method="GET", headers={}, allow_truncated=False, follow_redirects=True, deadline=5): return fetch(url, payload=payload, method=method, headers=header, allow_truncated=allow_truncated, follow_redirects=follow_redirects, deadline=deadline, validate_certificate=validate_certificate) return fixed_fetch class AppEngineHttpConnection(httplib.HTTPConnection): """Use httplib on App Engine, but compensate for its weirdness. The parameters key_file, cert_file, proxy_info, ca_certs, and disable_ssl_certificate_validation are all dropped on the ground. """ def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPConnection.__init__(self, host, port=port, strict=strict, timeout=timeout) class AppEngineHttpsConnection(httplib.HTTPSConnection): """Same as AppEngineHttpConnection, but for HTTPS URIs.""" def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict, timeout=timeout) self._fetch = _new_fixed_fetch( not disable_ssl_certificate_validation) # Update the connection classes to use the Googel App Engine specific ones. SCHEME_TO_CONNECTION = { 'http': AppEngineHttpConnection, 'https': AppEngineHttpsConnection } except ImportError: pass class Http(object): """An HTTP client that handles: - all methods - caching - ETags - compression, - HTTPS - Basic - Digest - WSSE and more. """ def __init__(self, cache=None, timeout=None, proxy_info=proxy_info_from_environment, ca_certs=None, disable_ssl_certificate_validation=False): """If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout `proxy_info` may be: - a callable that takes the http scheme ('http' or 'https') and returns a ProxyInfo instance per request. By default, uses proxy_nfo_from_environment. - a ProxyInfo instance (static proxy config). - None (proxy disabled). ca_certs is the path of a file containing root CA certificates for SSL server certificate validation. By default, a CA cert file bundled with httplib2 is used. If disable_ssl_certificate_validation is true, SSL cert validation will not be performed. """ self.proxy_info = proxy_info self.ca_certs = ca_certs self.disable_ssl_certificate_validation = \ disable_ssl_certificate_validation # Map domain name to an httplib connection self.connections = {} # The location of the cache, for now a directory # where cached responses are held. if cache and isinstance(cache, basestring): self.cache = FileCache(cache) else: self.cache = cache # Name/password self.credentials = Credentials() # Key/cert self.certificates = KeyCerts() # authorization objects self.authorizations = [] # If set to False then no redirects are followed, even safe ones. self.follow_redirects = True # Which HTTP methods do we apply optimistic concurrency to, i.e. # which methods get an "if-match:" etag header added to them. self.optimistic_concurrency_methods = ["PUT", "PATCH"] # If 'follow_redirects' is True, and this is set to True then # all redirecs are followed, including unsafe ones. self.follow_all_redirects = False self.ignore_etag = False self.force_exception_to_status_code = False self.timeout = timeout # Keep Authorization: headers on a redirect. self.forward_authorization_headers = False def __getstate__(self): state_dict = copy.copy(self.__dict__) # In case request is augmented by some foreign object such as # credentials which handle auth if 'request' in state_dict: del state_dict['request'] if 'connections' in state_dict: del state_dict['connections'] return state_dict def __setstate__(self, state): self.__dict__.update(state) self.connections = {} def _auth_from_challenge(self, host, request_uri, headers, response, content): """A generator that creates Authorization objects that can be applied to requests. """ challenges = _parse_www_authenticate(response, 'www-authenticate') for cred in self.credentials.iter(host): for scheme in AUTH_SCHEME_ORDER: if challenges.has_key(scheme): yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) def add_credentials(self, name, password, domain=""): """Add a name and password that will be used any time a request requires authentication.""" self.credentials.add(name, password, domain) def add_certificate(self, key, cert, domain): """Add a key and cert that will be used any time a request requires authentication.""" self.certificates.add(key, cert, domain) def clear_credentials(self): """Remove all the names and passwords that are used for authentication""" self.credentials.clear() self.authorizations = [] def _conn_request(self, conn, request_uri, method, body, headers): for i in range(RETRIES): try: if hasattr(conn, 'sock') and conn.sock is None: conn.connect() conn.request(method, request_uri, body, headers) except socket.timeout: raise except socket.gaierror: conn.close() raise ServerNotFoundError("Unable to find the server at %s" % conn.host) except ssl_SSLError: conn.close() raise except socket.error, e: err = 0 if hasattr(e, 'args'): err = getattr(e, 'args')[0] else: err = e.errno if err == errno.ECONNREFUSED: # Connection refused raise except httplib.HTTPException: # Just because the server closed the connection doesn't apparently mean # that the server didn't send a response. if hasattr(conn, 'sock') and conn.sock is None: if i < RETRIES-1: conn.close() conn.connect() continue else: conn.close() raise if i < RETRIES-1: conn.close() conn.connect() continue try: response = conn.getresponse() except (socket.error, httplib.HTTPException): if i < RETRIES-1: conn.close() conn.connect() continue else: conn.close() raise else: content = "" if method == "HEAD": conn.close() else: content = response.read() response = Response(response) if method != "HEAD": content = _decompressContent(response, content) break return (response, content) def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): """Do the actual request using the connection object and also follow one level of redirects if necessary""" auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] auth = auths and sorted(auths)[0][1] or None if auth: auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers) if auth: if auth.response(response, body): auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers ) response._stale_digest = 1 if response.status == 401: for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): authorization.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) if response.status != 401: self.authorizations.append(authorization) authorization.response(response, body) break if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: # Pick out the location header and basically start from the beginning # remembering first to strip the ETag header and decrement our 'depth' if redirections: if not response.has_key('location') and response.status != 300: raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) # Fix-up relative redirects (which violate an RFC 2616 MUST) if response.has_key('location'): location = response['location'] (scheme, authority, path, query, fragment) = parse_uri(location) if authority == None: response['location'] = urlparse.urljoin(absolute_uri, location) if response.status == 301 and method in ["GET", "HEAD"]: response['-x-permanent-redirect-url'] = response['location'] if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) if headers.has_key('if-none-match'): del headers['if-none-match'] if headers.has_key('if-modified-since'): del headers['if-modified-since'] if 'authorization' in headers and not self.forward_authorization_headers: del headers['authorization'] if response.has_key('location'): location = response['location'] old_response = copy.deepcopy(response) if not old_response.has_key('content-location'): old_response['content-location'] = absolute_uri redirect_method = method if response.status in [302, 303]: redirect_method = "GET" body = None (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) response.previous = old_response else: raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content) elif response.status in [200, 203] and method in ["GET", "HEAD"]: # Don't cache 206's since we aren't going to handle byte range requests if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) return (response, content) def _normalize_headers(self, headers): return _normalize_headers(headers) # Need to catch and rebrand some exceptions # Then need to optionally turn all exceptions into status codes # including all socket.* and httplib.* exceptions. def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): """ Performs a single HTTP request. The 'uri' is the URI of the HTTP resource and can begin with either 'http' or 'https'. The value of 'uri' must be an absolute URI. The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. There is no restriction on the methods allowed. The 'body' is the entity body to be sent with the request. It is a string object. Any extra headers that are to be sent with the request should be provided in the 'headers' dictionary. The maximum number of redirect to follow before raising an exception is 'redirections. The default is 5. The return value is a tuple of (response, content), the first being and instance of the 'Response' class, the second being a string that contains the response entity body. """ try: if headers is None: headers = {} else: headers = self._normalize_headers(headers) if not headers.has_key('user-agent'): headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__ uri = iri2uri(uri) (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) domain_port = authority.split(":")[0:2] if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http': scheme = 'https' authority = domain_port[0] proxy_info = self._get_proxy_info(scheme, authority) conn_key = scheme+":"+authority if conn_key in self.connections: conn = self.connections[conn_key] else: if not connection_type: connection_type = SCHEME_TO_CONNECTION[scheme] certs = list(self.certificates.iter(authority)) if scheme == 'https': if certs: conn = self.connections[conn_key] = connection_type( authority, key_file=certs[0][0], cert_file=certs[0][1], timeout=self.timeout, proxy_info=proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=proxy_info) conn.set_debuglevel(debuglevel) if 'range' not in headers and 'accept-encoding' not in headers: headers['accept-encoding'] = 'gzip, deflate' info = email.Message.Message() cached_value = None if self.cache: cachekey = defrag_uri cached_value = self.cache.get(cachekey) if cached_value: # info = email.message_from_string(cached_value) # # Need to replace the line above with the kludge below # to fix the non-existent bug not fixed in this # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html try: info, content = cached_value.split('\r\n\r\n', 1) feedparser = email.FeedParser.FeedParser() feedparser.feed(info) info = feedparser.close() feedparser._parse = None except (IndexError, ValueError): self.cache.delete(cachekey) cachekey = None cached_value = None else: cachekey = None if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: # http://www.w3.org/1999/04/Editing/ headers['if-match'] = info['etag'] if method not in ["GET", "HEAD"] and self.cache and cachekey: # RFC 2616 Section 13.10 self.cache.delete(cachekey) # Check the vary header in the cache to see if this request # matches what varies in the cache. if method in ['GET', 'HEAD'] and 'vary' in info: vary = info['vary'] vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header value = info[key] if headers.get(header, None) != value: cached_value = None break if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: if info.has_key('-x-permanent-redirect-url'): # Should cached permanent redirects be counted in our redirection count? For now, yes. if redirections <= 0: raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "") (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) response.previous = Response(info) response.previous.fromcache = True else: # Determine our course of action: # Is the cached entry fresh or stale? # Has the client requested a non-cached response? # # There seems to be three possible answers: # 1. [FRESH] Return the cache entry w/o doing a GET # 2. [STALE] Do the GET (but add in cache validators if available) # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request entry_disposition = _entry_disposition(info, headers) if entry_disposition == "FRESH": if not cached_value: info['status'] = '504' content = "" response = Response(info) if cached_value: response.fromcache = True return (response, content) if entry_disposition == "STALE": if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: headers['if-none-match'] = info['etag'] if info.has_key('last-modified') and not 'last-modified' in headers: headers['if-modified-since'] = info['last-modified'] elif entry_disposition == "TRANSPARENT": pass (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) if response.status == 304 and method == "GET": # Rewrite the cache entry with the new end-to-end headers # Take all headers that are in response # and overwrite their values in info. # unless they are hop-by-hop, or are listed in the connection header. for key in _get_end2end_headers(response): info[key] = response[key] merged_response = Response(info) if hasattr(response, "_stale_digest"): merged_response._stale_digest = response._stale_digest _updateCache(headers, merged_response, content, self.cache, cachekey) response = merged_response response.status = 200 response.fromcache = True elif response.status == 200: content = new_content else: self.cache.delete(cachekey) content = new_content else: cc = _parse_cache_control(headers) if cc.has_key('only-if-cached'): info['status'] = '504' response = Response(info) content = "" else: (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) except Exception, e: if self.force_exception_to_status_code: if isinstance(e, HttpLib2ErrorWithResponse): response = e.response content = e.content response.status = 500 response.reason = str(e) elif isinstance(e, socket.timeout): content = "Request Timeout" response = Response({ "content-type": "text/plain", "status": "408", "content-length": len(content) }) response.reason = "Request Timeout" else: content = str(e) response = Response({ "content-type": "text/plain", "status": "400", "content-length": len(content) }) response.reason = "Bad Request" else: raise return (response, content) def _get_proxy_info(self, scheme, authority): """Return a ProxyInfo instance (or None) based on the scheme and authority. """ hostname, port = urllib.splitport(authority) proxy_info = self.proxy_info if callable(proxy_info): proxy_info = proxy_info(scheme) if (hasattr(proxy_info, 'applies_to') and not proxy_info.applies_to(hostname)): proxy_info = None return proxy_info class Response(dict): """An object more like email.Message than httplib.HTTPResponse.""" """Is this response from our local cache""" fromcache = False """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ version = 11 "Status code returned by server. " status = 200 """Reason phrase returned by server.""" reason = "Ok" previous = None def __init__(self, info): # info is either an email.Message or # an httplib.HTTPResponse object. if isinstance(info, httplib.HTTPResponse): for key, value in info.getheaders(): self[key.lower()] = value self.status = info.status self['status'] = str(self.status) self.reason = info.reason self.version = info.version elif isinstance(info, email.Message.Message): for key, value in info.items(): self[key.lower()] = value self.status = int(self['status']) else: for key, value in info.iteritems(): self[key.lower()] = value self.status = int(self.get('status', self.status)) self.reason = self.get('reason', self.reason) def __getattr__(self, name): if name == 'dict': return self else: raise AttributeError, name
bsd-3-clause
1,604,761,334,595,194,000
7,842,876,315,277,879,000
40.995172
235
0.578263
false
surgebiswas/poker
PokerBots_2017/Johnny/scipy/weave/ext_tools.py
92
17820
from __future__ import absolute_import, print_function import os import sys import re from . import catalog from . import build_tools from . import converters from . import base_spec class ext_function_from_specs(object): def __init__(self,name,code_block,arg_specs): self.name = name self.arg_specs = base_spec.arg_spec_list(arg_specs) self.code_block = code_block self.compiler = '' self.customize = base_info.custom_info() def header_code(self): pass def function_declaration_code(self): code = 'static PyObject* %s(PyObject*self, PyObject* args,' \ ' PyObject* kywds)\n{\n' return code % self.name def template_declaration_code(self): code = 'template<class T>\n' \ 'static PyObject* %s(PyObject*self, PyObject* args,' \ ' PyObject* kywds)\n{\n' return code % self.name #def cpp_function_declaration_code(self): # pass #def cpp_function_call_code(self): #s pass def parse_tuple_code(self): """ Create code block for PyArg_ParseTuple. Variable declarations for all PyObjects are done also. This code got a lot uglier when I added local_dict... """ declare_return = 'py::object return_val;\n' \ 'int exception_occurred = 0;\n' \ 'PyObject *py_local_dict = NULL;\n' arg_string_list = self.arg_specs.variable_as_strings() + ['"local_dict"'] arg_strings = ','.join(arg_string_list) if arg_strings: arg_strings += ',' declare_kwlist = 'static const char *kwlist[] = {%s NULL};\n' % \ arg_strings py_objects = ', '.join(self.arg_specs.py_pointers()) init_flags = ', '.join(self.arg_specs.init_flags()) init_flags_init = '= '.join(self.arg_specs.init_flags()) py_vars = ' = '.join(self.arg_specs.py_variables()) if py_objects: declare_py_objects = 'PyObject ' + py_objects + ';\n' declare_py_objects += 'int ' + init_flags + ';\n' init_values = py_vars + ' = NULL;\n' init_values += init_flags_init + ' = 0;\n\n' else: declare_py_objects = '' init_values = '' #Each variable is in charge of its own cleanup now. #cnt = len(arg_list) #declare_cleanup = "blitz::TinyVector<PyObject*,%d> clean_up(0);\n" % cnt ref_string = ', '.join(self.arg_specs.py_references()) if ref_string: ref_string += ', &py_local_dict' else: ref_string = '&py_local_dict' format = "O" * len(self.arg_specs) + "|O" + ':' + self.name parse_tuple = 'if(!PyArg_ParseTupleAndKeywords(args,' \ 'kywds,"%s",const_cast<char**>(kwlist),%s))\n' % \ (format,ref_string) parse_tuple += ' return NULL;\n' return declare_return + declare_kwlist + declare_py_objects \ + init_values + parse_tuple def arg_declaration_code(self): arg_strings = [] for arg in self.arg_specs: arg_strings.append(arg.declaration_code()) arg_strings.append(arg.init_flag() + " = 1;\n") code = "".join(arg_strings) return code def arg_cleanup_code(self): arg_strings = [] have_cleanup = filter(lambda x:x.cleanup_code(),self.arg_specs) for arg in have_cleanup: code = "if(%s)\n" % arg.init_flag() code += "{\n" code += indent(arg.cleanup_code(),4) code += "}\n" arg_strings.append(code) code = "".join(arg_strings) return code def arg_local_dict_code(self): arg_strings = [] for arg in self.arg_specs: arg_strings.append(arg.local_dict_code()) code = "".join(arg_strings) return code def function_code(self): decl_code = indent(self.arg_declaration_code(),4) cleanup_code = indent(self.arg_cleanup_code(),4) function_code = indent(self.code_block,4) local_dict_code = indent(self.arg_local_dict_code(),4) dict_code = "if(py_local_dict) \n" \ "{ \n" \ " py::dict local_dict = py::dict(py_local_dict); \n" + \ local_dict_code + \ "} \n" try_code = "try \n" \ "{ \n" + \ decl_code + \ " /*<function call here>*/ \n" + \ function_code + \ indent(dict_code,4) + \ "\n} \n" catch_code = "catch(...) \n" \ "{ \n" + \ " return_val = py::object(); \n" \ " exception_occurred = 1; \n" \ "} \n" return_code = " /*cleanup code*/ \n" + \ cleanup_code + \ ' if(!(PyObject*)return_val && !exception_occurred)\n' \ ' {\n \n' \ ' return_val = Py_None; \n' \ ' }\n \n' \ ' return return_val.disown(); \n' \ '} \n' all_code = self.function_declaration_code() + \ indent(self.parse_tuple_code(),4) + \ indent(try_code,4) + \ indent(catch_code,4) + \ return_code return all_code def python_function_definition_code(self): args = (self.name, self.name) function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS|' \ 'METH_KEYWORDS},\n' % args return function_decls def set_compiler(self,compiler): self.compiler = compiler for arg in self.arg_specs: arg.set_compiler(compiler) class ext_function(ext_function_from_specs): def __init__(self,name,code_block, args, local_dict=None, global_dict=None, auto_downcast=1, type_converters=None): call_frame = sys._getframe().f_back if local_dict is None: local_dict = call_frame.f_locals if global_dict is None: global_dict = call_frame.f_globals if type_converters is None: type_converters = converters.default arg_specs = assign_variable_types(args,local_dict, global_dict, auto_downcast, type_converters) ext_function_from_specs.__init__(self,name,code_block,arg_specs) from . import base_info class ext_module(object): def __init__(self,name,compiler=''): standard_info = converters.standard_info self.name = name self.functions = [] self.compiler = compiler self.customize = base_info.custom_info() self._build_information = base_info.info_list(standard_info) def add_function(self,func): self.functions.append(func) def module_code(self): code = '\n'.join([ """\ #ifdef __CPLUSPLUS__ extern "C" { #endif """, self.warning_code(), self.header_code(), self.support_code(), self.function_code(), self.python_function_definition_code(), self.module_init_code(), """\ #ifdef __CPLUSCPLUS__ } #endif """ ]) return code def arg_specs(self): all_arg_specs = base_spec.arg_spec_list() for func in self.functions: all_arg_specs += func.arg_specs return all_arg_specs def build_information(self): info = self._build_information + [self.customize] + \ self.arg_specs().build_information() for func in self.functions: info.append(func.customize) #redundant, but easiest place to make sure compiler is set for i in info: i.set_compiler(self.compiler) return info def get_headers(self): all_headers = self.build_information().headers() # blitz/array.h always needs to go before most other headers, so we # hack that here, but we need to ensure that Python.h is the very # first header included. As indicated in # http://docs.python.org/api/includes.html # "Warning: Since Python may define some pre-processor definitions which # affect the standard headers on some systems, you must include Python.h # before any standard headers are included. " # Since blitz/array.h pulls in system headers, we must massage this # list a bit so that the order is Python.h, blitz/array.h, ... if '"blitz/array.h"' in all_headers: all_headers.remove('"blitz/array.h"') # Insert blitz AFTER Python.h, which must remain the first header all_headers.insert(1,'"blitz/array.h"') return all_headers def warning_code(self): all_warnings = self.build_information().warnings() w = map(lambda x: "#pragma warning(%s)\n" % x,all_warnings) return '#ifndef __GNUC__\n' + ''.join(w) + '\n#endif' def header_code(self): h = self.get_headers() h = map(lambda x: '#include ' + x + '\n',h) return ''.join(h) + '\n' def support_code(self): code = self.build_information().support_code() return ''.join(code) + '\n' def function_code(self): all_function_code = "" for func in self.functions: all_function_code += func.function_code() return ''.join(all_function_code) + '\n' def python_function_definition_code(self): all_definition_code = "" for func in self.functions: all_definition_code += func.python_function_definition_code() all_definition_code = indent(''.join(all_definition_code),4) code = 'static PyMethodDef compiled_methods[] = \n' \ '{\n' \ '%s' \ ' {NULL, NULL} /* Sentinel */\n' \ '};\n' return code % (all_definition_code) def module_init_code(self): init_code_list = self.build_information().module_init_code() init_code = indent(''.join(init_code_list),4) code = 'PyMODINIT_FUNC init%s(void)\n' \ '{\n' \ '%s' \ ' (void) Py_InitModule("%s", compiled_methods);\n' \ '}\n' % (self.name,init_code,self.name) return code def generate_file(self,file_name="",location='.'): code = self.module_code() if not file_name: file_name = self.name + '.cpp' name = generate_file_name(file_name,location) #return name return generate_module(code,name) def set_compiler(self,compiler): # This is not used anymore -- I think we should ditch it. #for i in self.arg_specs() # i.set_compiler(compiler) for i in self.build_information(): i.set_compiler(compiler) for i in self.functions: i.set_compiler(compiler) self.compiler = compiler def build_kw_and_file(self,location,kw): arg_specs = self.arg_specs() info = self.build_information() _source_files = info.sources() # remove duplicates source_files = {} for i in _source_files: source_files[i] = None source_files = source_files.keys() # add internally specified macros, includes, etc. to the key words # values of the same names so that distutils will use them. kw['define_macros'] = kw.get('define_macros',[]) + \ info.define_macros() kw['include_dirs'] = kw.get('include_dirs',[]) + info.include_dirs() kw['libraries'] = kw.get('libraries',[]) + info.libraries() kw['library_dirs'] = kw.get('library_dirs',[]) + info.library_dirs() kw['extra_compile_args'] = kw.get('extra_compile_args',[]) + \ info.extra_compile_args() kw['extra_link_args'] = kw.get('extra_link_args',[]) + \ info.extra_link_args() kw['sources'] = kw.get('sources',[]) + source_files file = self.generate_file(location=location) return kw,file def setup_extension(self,location='.',**kw): kw,file = self.build_kw_and_file(location,kw) return build_tools.create_extension(file, **kw) def compile(self,location='.',compiler=None, verbose=0, **kw): if compiler is not None: self.compiler = compiler # !! removed -- we don't have any compiler dependent code # currently in spec or info classes # hmm. Is there a cleaner way to do this? Seems like # choosing the compiler spagettis around a little. #compiler = build_tools.choose_compiler(self.compiler) #self.set_compiler(compiler) kw,file = self.build_kw_and_file(location,kw) # This is needed so that files build correctly even when different # versions of Python are running around. # Imported at beginning of file now to help with test paths. # import catalog #temp = catalog.default_temp_dir() # for speed, build in the machines temp directory temp = catalog.intermediate_dir() success = build_tools.build_extension(file, temp_dir=temp, compiler_name=compiler, verbose=verbose, **kw) if not success: raise SystemError('Compilation failed') def generate_file_name(module_name,module_location): module_file = os.path.join(module_location,module_name) return os.path.abspath(module_file) def generate_module(module_string, module_file): """ generate the source code file. Only overwrite the existing file if the actual source has changed. """ file_changed = 1 if os.path.exists(module_file): f = open(module_file,'r') old_string = f.read() f.close() if old_string == module_string: file_changed = 0 if file_changed: f = open(module_file,'w') f.write(module_string) f.close() return module_file def assign_variable_types(variables,local_dict={}, global_dict={}, auto_downcast=1, type_converters=converters.default): incoming_vars = {} incoming_vars.update(global_dict) incoming_vars.update(local_dict) variable_specs = [] errors = {} for var in variables: try: example_type = incoming_vars[var] # look through possible type specs to find which one # should be used to for example_type spec = None for factory in type_converters: if factory.type_match(example_type): spec = factory.type_spec(var,example_type) break if not spec: # should really define our own type. raise IndexError else: variable_specs.append(spec) except KeyError: errors[var] = ("The type and dimensionality specifications" + "for variable '" + var + "' are missing.") except IndexError: errors[var] = ("Unable to convert variable '" + var + "' to a C++ type.") if errors: raise TypeError(format_error_msg(errors)) if auto_downcast: variable_specs = downcast(variable_specs) return variable_specs def downcast(var_specs): """ Cast python scalars down to most common type of arrays used. Right now, focus on complex and float types. Ignore int types. Require all arrays to have same type before forcing downcasts. Note: var_specs are currently altered in place (horrors...!) """ numeric_types = [] #grab all the numeric types associated with a variables. for var in var_specs: if hasattr(var,'numeric_type'): numeric_types.append(var.numeric_type) # if arrays are present, but none of them are double precision, # make all numeric types float or complex(float) if (('f' in numeric_types or 'F' in numeric_types) and not ( 'd' in numeric_types or 'D' in numeric_types)): for var in var_specs: if hasattr(var,'numeric_type'): if issubclass(var.numeric_type, complex): var.numeric_type = 'F' elif issubclass(var.numeric_type, float): var.numeric_type = 'f' return var_specs def indent(st,spaces): indention = ' '*spaces indented = indention + st.replace('\n','\n'+indention) # trim off any trailing spaces indented = re.sub(r' +$',r'',indented) return indented def format_error_msg(errors): #minimum effort right now... import pprint import cStringIO msg = cStringIO.StringIO() pprint.pprint(errors,msg) return msg.getvalue()
mit
-526,055,665,031,121,540
7,658,358,135,504,927,000
35.970954
83
0.529574
false
nitin-cherian/Webapps
SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/messages/api.py
48
3147
from django.contrib.messages import constants from django.contrib.messages.storage import default_storage __all__ = ( 'add_message', 'get_messages', 'get_level', 'set_level', 'debug', 'info', 'success', 'warning', 'error', 'MessageFailure', ) class MessageFailure(Exception): pass def add_message(request, level, message, extra_tags='', fail_silently=False): """ Attempts to add a message to the request using the 'messages' app. """ try: messages = request._messages except AttributeError: if not hasattr(request, 'META'): raise TypeError( "add_message() argument must be an HttpRequest object, not " "'%s'." % request.__class__.__name__ ) if not fail_silently: raise MessageFailure( 'You cannot add messages without installing ' 'django.contrib.messages.middleware.MessageMiddleware' ) else: return messages.add(level, message, extra_tags) def get_messages(request): """ Returns the message storage on the request if it exists, otherwise returns an empty list. """ return getattr(request, '_messages', []) def get_level(request): """ Returns the minimum level of messages to be recorded. The default level is the ``MESSAGE_LEVEL`` setting. If this is not found, the ``INFO`` level is used. """ storage = getattr(request, '_messages', default_storage(request)) return storage.level def set_level(request, level): """ Sets the minimum level of messages to be recorded, returning ``True`` if the level was recorded successfully. If set to ``None``, the default level will be used (see the ``get_level`` method). """ if not hasattr(request, '_messages'): return False request._messages.level = level return True def debug(request, message, extra_tags='', fail_silently=False): """ Adds a message with the ``DEBUG`` level. """ add_message(request, constants.DEBUG, message, extra_tags=extra_tags, fail_silently=fail_silently) def info(request, message, extra_tags='', fail_silently=False): """ Adds a message with the ``INFO`` level. """ add_message(request, constants.INFO, message, extra_tags=extra_tags, fail_silently=fail_silently) def success(request, message, extra_tags='', fail_silently=False): """ Adds a message with the ``SUCCESS`` level. """ add_message(request, constants.SUCCESS, message, extra_tags=extra_tags, fail_silently=fail_silently) def warning(request, message, extra_tags='', fail_silently=False): """ Adds a message with the ``WARNING`` level. """ add_message(request, constants.WARNING, message, extra_tags=extra_tags, fail_silently=fail_silently) def error(request, message, extra_tags='', fail_silently=False): """ Adds a message with the ``ERROR`` level. """ add_message(request, constants.ERROR, message, extra_tags=extra_tags, fail_silently=fail_silently)
mit
-8,987,138,726,584,838,000
-2,643,047,545,871,463,400
28.411215
78
0.630124
false
averagehat/scikit-bio
skbio/io/format/fasta.py
3
38087
""" FASTA/QUAL format (:mod:`skbio.io.format.fasta`) ================================================ .. currentmodule:: skbio.io.format.fasta The FASTA file format (``fasta``) stores biological (i.e., nucleotide or protein) sequences in a simple plain text format that is both human-readable and easy to parse. The file format was first introduced and used in the FASTA software package [1]_. Additional descriptions of the file format can be found in [2]_ and [3]_. An example of a FASTA-formatted file containing two DNA sequences:: >seq1 db-accession-149855 CGATGTCGATCGATCGATCGATCAG >seq2 db-accession-34989 CATCGATCGATCGATGCATGCATGCATG The QUAL file format is an additional format related to FASTA. A FASTA file is sometimes accompanied by a QUAL file, particuarly when the FASTA file contains sequences generated on a high-throughput sequencing instrument. QUAL files store a Phred quality score (nonnegative integer) for each base in a sequence stored in FASTA format (see [4]_ for more details). scikit-bio supports reading and writing FASTA (and optionally QUAL) file formats. Format Support -------------- **Has Sniffer: Yes** +------+------+---------------------------------------------------------------+ |Reader|Writer| Object Class | +======+======+===============================================================+ |Yes |Yes |generator of :mod:`skbio.sequence.Sequence` objects | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.alignment.SequenceCollection` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.alignment.Alignment` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.Sequence` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.DNA` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.RNA` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.Protein` | +------+------+---------------------------------------------------------------+ .. note:: All readers and writers support an optional QUAL file via the ``qual`` parameter. If one is provided, quality scores will be read/written in addition to FASTA sequence data. Format Specification -------------------- The following sections define the FASTA and QUAL file formats in detail. FASTA Format ^^^^^^^^^^^^ A FASTA file contains one or more biological sequences. The sequences are stored sequentially, with a *record* for each sequence (also referred to as a *FASTA record*). Each *record* consists of a single-line *header* (sometimes referred to as a *defline*, *label*, *description*, or *comment*) followed by the sequence data, optionally split over multiple lines. .. note:: Blank or whitespace-only lines are only allowed at the beginning of the file, between FASTA records, or at the end of the file. A blank or whitespace-only line after the header line, within the sequence (for FASTA files), or within quality scores (for QUAL files) will raise an error. scikit-bio will ignore leading and trailing whitespace characters on each line while reading. .. note:: scikit-bio does not currently support legacy FASTA format (i.e., headers/comments denoted with a semicolon). The format supported by scikit-bio (described below in detail) most closely resembles the description given in NCBI's BLAST documentation [3]_. See [2]_ for more details on legacy FASTA format. If you would like legacy FASTA format support added to scikit-bio, please consider submitting a feature request on the `scikit-bio issue tracker <https://github.com/biocore/scikit-bio/issues>`_ (pull requests are also welcome!). Sequence Header ~~~~~~~~~~~~~~~ Each sequence header consists of a single line beginning with a greater-than (``>``) symbol. Immediately following this is a sequence identifier (ID) and description separated by one or more whitespace characters. The sequence ID and description are stored in the sequence `metadata` attribute, under the `'id'` and `'description'` keys, repectively. Both are optional. Each will be represented as the empty string (``''``) in `metadata` if it is not present in the header. A sequence ID consists of a single *word*: all characters after the greater- than symbol and before the first whitespace character (if any) are taken as the sequence ID. Unique sequence IDs are not strictly enforced by the FASTA format itself. A single standardized ID format is similarly not enforced by the FASTA format, though it is often common to use a unique library accession number for a sequence ID (e.g., NCBI's FASTA defline format [5]_). .. note:: scikit-bio will enforce sequence ID uniqueness depending on the type of object that the FASTA file is read into. For example, reading a FASTA file as a generator of ``Sequence`` objects will not enforce unique IDs since it simply yields each sequence it finds in the FASTA file. However, if the FASTA file is read into a ``SequenceCollection`` object, ID uniqueness will be enforced because that is a requirement of a ``SequenceCollection``. If a description is present, it is taken as the remaining characters that follow the sequence ID and initial whitespace(s). The description is considered additional information about the sequence (e.g., comments about the source of the sequence or the molecule that it encodes). For example, consider the following header:: >seq1 db-accession-149855 ``seq1`` is the sequence ID and ``db-accession-149855`` is the sequence description. .. note:: scikit-bio's readers will remove all leading and trailing whitespace from the description. If a header line begins with whitespace following the ``>``, the ID is assumed to be missing and the remainder of the line is taken as the description. Sequence Data ~~~~~~~~~~~~~ Biological sequence data follows the header, and can be split over multiple lines. The sequence data (i.e., nucleotides or amino acids) are stored using the standard IUPAC lexicon (single-letter codes). .. note:: scikit-bio supports both upper and lower case characters. This functionality depends on the type of object the data is being read into. For ``Sequence`` objects, sciki-bio doesn't care about the case. However, for other object types, such as :class:`skbio.sequence.DNA`, :class:`skbio.sequence.RNA`, and :class:`skbio.sequence.Protein`, the `lowercase` parameter must be used to control case functionality. Refer to the documentation for the constructors for details. .. note:: Both ``-`` and ``.`` are supported as gap characters. See :mod:`skbio.sequence` for more details on how scikit-bio interprets sequence data in its in-memory objects. Validation is performed for all scikit-bio objects which support it. This consists of all objects which enforce usage of IUPAC characters. If any invalid IUPAC characters are found in the sequence while reading from the FASTA file, an exception is raised. QUAL Format ^^^^^^^^^^^ A QUAL file contains quality scores for one or more biological sequences stored in a corresponding FASTA file. QUAL format is very similar to FASTA format: it stores records sequentially, with each record beginning with a header line containing a sequence ID and description. The same rules apply to QUAL headers as FASTA headers (see the above sections for details). scikit-bio processes FASTA and QUAL headers in exactly the same way. Quality scores are automatically stored in the object's `positional_metadata` attribute, under the `'quality'` column. Instead of storing biological sequence data in each record, a QUAL file stores a Phred quality score for each base in the corresponding sequence. Quality scores are represented as nonnegative integers separated by whitespace (typically a single space or newline), and can span multiple lines. .. note:: When reading FASTA and QUAL files, scikit-bio requires records to be in the same order in both files (i.e., each FASTA and QUAL record must have the same ID and description after being parsed). In addition to having the same order, the number of FASTA records must match the number of QUAL records (i.e., missing or additonal records are not allowed). scikit-bio also requires that the number of quality scores match the number of bases in the corresponding sequence. When writing FASTA and QUAL files, scikit-bio will maintain the same ordering of records in both files (i.e., using the same ID and description in both records) to support future reading. Format Parameters ----------------- The following parameters are available to change how FASTA/QUAL files are read or written in scikit-bio. QUAL File Parameter (Readers and Writers) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``qual`` parameter is available to all FASTA format readers and writers. It can be any file-like type supported by scikit-bio's I/O registry (e.g., file handle, file path, etc.). If ``qual`` is provided when reading, quality scores will be included in each in-memory ``Sequence`` object, in addition to sequence data stored in the FASTA file. When writing, quality scores will be written in QUAL format in addition to the sequence data being written in FASTA format. Reader-specific Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^ The available reader parameters differ depending on which reader is used. Generator, SequenceCollection, and Alignment Reader Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``constructor`` parameter can be used with the ``Sequence`` generator, ``SequenceCollection``, and ``Alignment`` FASTA readers. ``constructor`` specifies the in-memory type of each sequence that is parsed, and defaults to ``Sequence``. ``constructor`` should be a subclass of ``Sequence``. For example, if you know that the FASTA file you're reading contains protein sequences, you would pass ``constructor=Protein`` to the reader call. .. note:: The FASTA sniffer will not attempt to guess the ``constructor`` parameter, so it will always default to ``Sequence`` if another type is not provided to the reader. Sequence Reader Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``seq_num`` parameter can be used with the ``Sequence``, ``DNA``, ``RNA``, and ``Protein`` FASTA readers. ``seq_num`` specifies which sequence to read from the FASTA file (and optional QUAL file), and defaults to 1 (i.e., such that the first sequence is read). For example, to read the 50th sequence from a FASTA file, you would pass ``seq_num=50`` to the reader call. Writer-specific Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^ The following parameters are available to all FASTA format writers: - ``id_whitespace_replacement``: string to replace **each** whitespace character in a sequence ID. This parameter is useful for cases where an in-memory sequence ID contains whitespace, which would result in an on-disk representation that would not be read back into memory as the same ID (since IDs in FASTA format cannot contain whitespace). Defaults to ``_``. If ``None``, no whitespace replacement is performed and IDs are written as they are stored in memory (this has the potential to create an invalid FASTA-formatted file; see note below). This parameter also applies to a QUAL file if one is provided. - ``description_newline_replacement``: string to replace **each** newline character in a sequence description. Since a FASTA header must be a single line, newlines are not allowed in sequence descriptions and must be replaced in order to write a valid FASTA file. Defaults to a single space. If ``None``, no newline replacement is performed and descriptions are written as they are stored in memory (this has the potential to create an invalid FASTA-formatted file; see note below). This parameter also applies to a QUAL file if one is provided. - ``max_width``: integer specifying the maximum line width (i.e., number of characters) for sequence data and/or quality scores. If a sequence or its quality scores are longer than ``max_width``, it will be split across multiple lines, each with a maximum width of ``max_width``. Note that there are some caveats when splitting quality scores. A single quality score will *never* be split across multiple lines, otherwise it would become two different quality scores when read again. Thus, splitting only occurs *between* quality scores. This makes it possible to have a single long quality score written on its own line that exceeds ``max_width``. For example, the quality score ``12345`` would not be split across multiple lines even if ``max_width=3``. Thus, a 5-character line would be written. Default behavior is to not split sequence data or quality scores across multiple lines. - ``lowercase``: String or boolean array. If a string, it is treated as a key into the positional metadata of the object. If a boolean array, it indicates characters to write in lowercase. Characters in the sequence corresponding to `True` values will be written in lowercase. The boolean array must be the same length as the sequence. .. note:: The FASTA format writers will have noticeably better runtime performance if ``id_whitespace_replacement`` and/or ``description_newline_replacement`` are set to ``None`` so that whitespace replacement is not performed during writing. However, this can potentially create invalid FASTA files, especially if there are newline characters in the IDs or descriptions. For IDs with whitespace, this can also affect how the IDs are read into memory in a subsequent read operation. For example, if an in-memory sequence ID is ``'seq 1'`` and ``id_whitespace_replacement=None``, reading the FASTA file back into memory would result in an ID of ``'seq'``, and ``'1'`` would be part of the sequence description. Examples -------- Reading and Writing FASTA Files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Suppose we have the following FASTA file with five equal-length sequences (example modified from [6]_):: >seq1 Turkey AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT >seq2 Salmo gair AAGCCTTGGCAGTGCAGGGTGAGCCGTGG CCGGGCACGGTAT >seq3 H. Sapiens ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA >seq4 Chimp AAACCCTTGCCG TTACGCTTAAAC CGAGGCCGGGAC ACTCAT >seq5 Gorilla AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA .. note:: Original copyright notice for the above example file: *(c) Copyright 1986-2008 by The University of Washington. Written by Joseph Felsenstein. Permission is granted to copy this document provided that no fee is charged for it and that this copyright notice is not removed.* Note that the sequences are not required to be of equal length in order for the file to be a valid FASTA file (this depends on the object that you're reading the file into). Also note that some of the sequences occur on a single line, while others are split across multiple lines. Let's define this file in-memory as a ``StringIO``, though this could be a real file path, file handle, or anything that's supported by scikit-bio's I/O registry in practice: >>> fl = [u">seq1 Turkey\\n", ... u"AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n", ... u">seq2 Salmo gair\\n", ... u"AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n", ... u"CCGGGCACGGTAT\\n", ... u">seq3 H. Sapiens\\n", ... u"ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n", ... u">seq4 Chimp\\n", ... u"AAACCCTTGCCG\\n", ... u"TTACGCTTAAAC\\n", ... u"CGAGGCCGGGAC\\n", ... u"ACTCAT\\n", ... u">seq5 Gorilla\\n", ... u"AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n"] Let's read the FASTA file into a ``SequenceCollection``: >>> from skbio import SequenceCollection >>> sc = SequenceCollection.read(fl) >>> sc.sequence_lengths() [42, 42, 42, 42, 42] >>> sc.ids() [u'seq1', u'seq2', u'seq3', u'seq4', u'seq5'] We see that all 5 sequences have 42 characters, and that each of the sequence IDs were successfully read into memory. Since these sequences are of equal length (presumably because they've been aligned), let's load the FASTA file into an ``Alignment`` object, which is a more appropriate data structure: >>> from skbio import Alignment >>> aln = Alignment.read(fl) >>> aln.sequence_length() 42 Note that we were able to read the FASTA file into two different data structures (``SequenceCollection`` and ``Alignment``) using the exact same ``read`` method call (and underlying reading/parsing logic). Also note that we didn't specify a file format in the ``read`` call. The FASTA sniffer detected the correct file format for us! Let's inspect the type of sequences stored in the ``Alignment``: >>> aln[0] Sequence ------------------------------------------------ Metadata: u'description': u'Turkey' u'id': u'seq1' Stats: length: 42 ------------------------------------------------ 0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT By default, sequences are loaded as ``Sequence`` objects. We can change the type of sequence via the ``constructor`` parameter: >>> from skbio import DNA >>> aln = Alignment.read(fl, constructor=DNA) >>> aln[0] # doctest: +NORMALIZE_WHITESPACE DNA ------------------------------------------------ Metadata: u'description': u'Turkey' u'id': u'seq1' Stats: length: 42 has gaps: False has degenerates: True has non-degenerates: True GC-content: 54.76% ------------------------------------------------ 0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT We now have an ``Alignment`` of ``DNA`` objects instead of ``Sequence`` objects. To write the alignment in FASTA format: >>> from io import StringIO >>> with StringIO() as fh: ... print(aln.write(fh).getvalue()) >seq1 Turkey AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT >seq2 Salmo gair AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT >seq3 H. Sapiens ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA >seq4 Chimp AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT >seq5 Gorilla AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA <BLANKLINE> Both ``SequenceCollection`` and ``Alignment`` load all of the sequences from the FASTA file into memory at once. If the FASTA file is large (which is often the case), this may be infeasible if you don't have enough memory. To work around this issue, you can stream the sequences using scikit-bio's generator-based FASTA reader and writer. The generator-based reader yields ``Sequence`` objects (or subclasses if ``constructor`` is supplied) one at a time, instead of loading all sequences into memory. For example, let's use the generator-based reader to process a single sequence at a time in a ``for`` loop: >>> import skbio.io >>> for seq in skbio.io.read(fl, format='fasta'): ... seq ... print('') Sequence ------------------------------------------------ Metadata: u'description': u'Turkey' u'id': u'seq1' Stats: length: 42 ------------------------------------------------ 0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT <BLANKLINE> Sequence ------------------------------------------------ Metadata: u'description': u'Salmo gair' u'id': u'seq2' Stats: length: 42 ------------------------------------------------ 0 AAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT <BLANKLINE> Sequence ------------------------------------------------ Metadata: u'description': u'H. Sapiens' u'id': u'seq3' Stats: length: 42 ------------------------------------------------ 0 ACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA <BLANKLINE> Sequence ------------------------------------------------ Metadata: u'description': u'Chimp' u'id': u'seq4' Stats: length: 42 ------------------------------------------------ 0 AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT <BLANKLINE> Sequence ------------------------------------------------ Metadata: u'description': u'Gorilla' u'id': u'seq5' Stats: length: 42 ------------------------------------------------ 0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA <BLANKLINE> A single sequence can also be read into a ``Sequence`` (or subclass): >>> from skbio import Sequence >>> seq = Sequence.read(fl) >>> seq Sequence ------------------------------------------------ Metadata: u'description': u'Turkey' u'id': u'seq1' Stats: length: 42 ------------------------------------------------ 0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT By default, the first sequence in the FASTA file is read. This can be controlled with ``seq_num``. For example, to read the fifth sequence: >>> seq = Sequence.read(fl, seq_num=5) >>> seq Sequence ------------------------------------------------ Metadata: u'description': u'Gorilla' u'id': u'seq5' Stats: length: 42 ------------------------------------------------ 0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA We can use the same API to read the fifth sequence into a ``DNA``: >>> dna_seq = DNA.read(fl, seq_num=5) >>> dna_seq DNA ------------------------------------------------ Metadata: u'description': u'Gorilla' u'id': u'seq5' Stats: length: 42 has gaps: False has degenerates: False has non-degenerates: True GC-content: 50.00% ------------------------------------------------ 0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA Individual sequence objects can also be written in FASTA format: >>> with StringIO() as fh: ... print(dna_seq.write(fh).getvalue()) >seq5 Gorilla AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA <BLANKLINE> Reading and Writing FASTA/QUAL Files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In addition to reading and writing standalone FASTA files, scikit-bio also supports reading and writing FASTA and QUAL files together. Suppose we have the following FASTA file:: >seq1 db-accession-149855 CGATGTC >seq2 db-accession-34989 CATCG Also suppose we have the following QUAL file:: >seq1 db-accession-149855 40 39 39 4 50 1 100 >seq2 db-accession-34989 3 3 10 42 80 >>> fasta_fl = [ ... u">seq1 db-accession-149855\\n", ... u"CGATGTC\\n", ... u">seq2 db-accession-34989\\n", ... u"CATCG\\n"] >>> qual_fl = [ ... u">seq1 db-accession-149855\\n", ... u"40 39 39 4\\n", ... u"50 1 100\\n", ... u">seq2 db-accession-34989\\n", ... u"3 3 10 42 80\\n"] To read in a single ``Sequence`` at a time, we can use the generator-based reader as we did above, providing both FASTA and QUAL files: >>> for seq in skbio.io.read(fasta_fl, qual=qual_fl, format='fasta'): ... seq ... print('') Sequence ------------------------------------------ Metadata: u'description': u'db-accession-149855' u'id': u'seq1' Positional metadata: u'quality': <dtype: uint8> Stats: length: 7 ------------------------------------------ 0 CGATGTC <BLANKLINE> Sequence ----------------------------------------- Metadata: u'description': u'db-accession-34989' u'id': u'seq2' Positional metadata: u'quality': <dtype: uint8> Stats: length: 5 ----------------------------------------- 0 CATCG <BLANKLINE> Note that the sequence objects have quality scores stored as positional metadata since we provided a QUAL file. The other FASTA readers operate in a similar manner. Now let's load the sequences and their quality scores into a ``SequenceCollection``: >>> sc = SequenceCollection.read(fasta_fl, qual=qual_fl) >>> sc <SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00> To write the sequence data and quality scores in the ``SequenceCollection`` to FASTA and QUAL files, respectively, we run: >>> new_fasta_fh = StringIO() >>> new_qual_fh = StringIO() >>> _ = sc.write(new_fasta_fh, qual=new_qual_fh) >>> print(new_fasta_fh.getvalue()) >seq1 db-accession-149855 CGATGTC >seq2 db-accession-34989 CATCG <BLANKLINE> >>> print(new_qual_fh.getvalue()) >seq1 db-accession-149855 40 39 39 4 50 1 100 >seq2 db-accession-34989 3 3 10 42 80 <BLANKLINE> >>> new_fasta_fh.close() >>> new_qual_fh.close() References ---------- .. [1] Lipman, DJ; Pearson, WR (1985). "Rapid and sensitive protein similarity searches". Science 227 (4693): 1435-41. .. [2] http://en.wikipedia.org/wiki/FASTA_format .. [3] http://blast.ncbi.nlm.nih.gov/blastcgihelp.shtml .. [4] https://www.broadinstitute.org/crd/wiki/index.php/Qual .. [5] Madden T. The BLAST Sequence Analysis Tool. 2002 Oct 9 [Updated 2003 Aug 13]. In: McEntyre J, Ostell J, editors. The NCBI Handbook [Internet]. Bethesda (MD): National Center for Biotechnology Information (US); 2002-. Chapter 16. Available from: http://www.ncbi.nlm.nih.gov/books/NBK21097/ .. [6] http://evolution.genetics.washington.edu/phylip/doc/sequence.html """ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import (absolute_import, division, print_function, unicode_literals) from future.builtins import range, zip from six.moves import zip_longest import textwrap import numpy as np from skbio.io import create_format, FASTAFormatError, QUALFormatError from skbio.io.registry import FileSentinel from skbio.io.format._base import (_get_nth_sequence, _parse_fasta_like_header, _format_fasta_like_records, _line_generator, _too_many_blanks) from skbio.util._misc import chunk_str from skbio.alignment import SequenceCollection, Alignment from skbio.sequence import Sequence, DNA, RNA, Protein fasta = create_format('fasta') @fasta.sniffer() def _fasta_sniffer(fh): # Strategy: # Ignore up to 5 blank/whitespace-only lines at the beginning of the # file. Read up to 10 records. If at least one record is read (i.e. # the file isn't empty) and no errors are thrown during reading, assume # the file is in FASTA format. If a record appears to be QUAL, do *not* # identify the file as FASTA since we don't want to sniff QUAL files as # FASTA (technically they can be read as FASTA since the sequences may # not be validated but it probably isn't what the user wanted). Also, if # we add QUAL as its own file format in the future, we wouldn't want the # FASTA and QUAL sniffers to both positively identify a QUAL file. if _too_many_blanks(fh, 5): return False, {} num_records = 10 empty = True try: parser = _parse_fasta_raw(fh, _sniffer_data_parser, FASTAFormatError) for _ in zip(range(num_records), parser): empty = False except FASTAFormatError: return False, {} if empty: return False, {} else: return True, {} def _sniffer_data_parser(chunks): data = _parse_sequence_data(chunks) try: _parse_quality_scores(chunks) except QUALFormatError: return data else: # used for flow control within sniffer, user should never see this # message raise FASTAFormatError('Data appear to be quality scores.') @fasta.reader(None) def _fasta_to_generator(fh, qual=FileSentinel, constructor=Sequence, **kwargs): if qual is None: for seq, id_, desc in _parse_fasta_raw(fh, _parse_sequence_data, FASTAFormatError): yield constructor(seq, metadata={'id': id_, 'description': desc}, **kwargs) else: fasta_gen = _parse_fasta_raw(fh, _parse_sequence_data, FASTAFormatError) qual_gen = _parse_fasta_raw(qual, _parse_quality_scores, QUALFormatError) for fasta_rec, qual_rec in zip_longest(fasta_gen, qual_gen, fillvalue=None): if fasta_rec is None: raise FASTAFormatError( "QUAL file has more records than FASTA file.") if qual_rec is None: raise FASTAFormatError( "FASTA file has more records than QUAL file.") fasta_seq, fasta_id, fasta_desc = fasta_rec qual_scores, qual_id, qual_desc = qual_rec if fasta_id != qual_id: raise FASTAFormatError( "IDs do not match between FASTA and QUAL records: %r != %r" % (str(fasta_id), str(qual_id))) if fasta_desc != qual_desc: raise FASTAFormatError( "Descriptions do not match between FASTA and QUAL " "records: %r != %r" % (str(fasta_desc), str(qual_desc))) # sequence and quality scores lengths are checked in constructor yield constructor( fasta_seq, metadata={'id': fasta_id, 'description': fasta_desc}, positional_metadata={'quality': qual_scores}, **kwargs) @fasta.reader(Sequence) def _fasta_to_biological_sequence(fh, qual=FileSentinel, seq_num=1): return _get_nth_sequence( _fasta_to_generator(fh, qual=qual, constructor=Sequence), seq_num) @fasta.reader(DNA) def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs): return _get_nth_sequence( _fasta_to_generator(fh, qual=qual, constructor=DNA, **kwargs), seq_num) @fasta.reader(RNA) def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs): return _get_nth_sequence( _fasta_to_generator(fh, qual=qual, constructor=RNA, **kwargs), seq_num) @fasta.reader(Protein) def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs): return _get_nth_sequence( _fasta_to_generator(fh, qual=qual, constructor=Protein, **kwargs), seq_num) @fasta.reader(SequenceCollection) def _fasta_to_sequence_collection(fh, qual=FileSentinel, constructor=Sequence, **kwargs): return SequenceCollection( list(_fasta_to_generator(fh, qual=qual, constructor=constructor, **kwargs))) @fasta.reader(Alignment) def _fasta_to_alignment(fh, qual=FileSentinel, constructor=Sequence, **kwargs): return Alignment( list(_fasta_to_generator(fh, qual=qual, constructor=constructor, **kwargs))) @fasta.writer(None) def _generator_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): if max_width is not None: if max_width < 1: raise ValueError( "Maximum line width must be greater than zero (max_width=%d)." % max_width) if qual is not None: # define text wrapper for splitting quality scores here for # efficiency. textwrap docs recommend reusing a TextWrapper # instance when it is used many times. configure text wrapper to # never break "words" (i.e., integer quality scores) across lines qual_wrapper = textwrap.TextWrapper( width=max_width, break_long_words=False, break_on_hyphens=False) formatted_records = _format_fasta_like_records( obj, id_whitespace_replacement, description_newline_replacement, qual is not None, lowercase) for header, seq_str, qual_scores in formatted_records: if max_width is not None: seq_str = chunk_str(seq_str, max_width, '\n') fh.write('>%s\n%s\n' % (header, seq_str)) if qual is not None: qual_str = ' '.join(np.asarray(qual_scores, dtype=np.str)) if max_width is not None: qual_str = qual_wrapper.fill(qual_str) qual.write('>%s\n%s\n' % (header, qual_str)) @fasta.writer(Sequence) def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None): _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement, description_newline_replacement, max_width) @fasta.writer(DNA) def _dna_sequence_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase) @fasta.writer(RNA) def _rna_sequence_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase) @fasta.writer(Protein) def _protein_sequence_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): _sequences_to_fasta([obj], fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase) @fasta.writer(SequenceCollection) def _sequence_collection_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase) @fasta.writer(Alignment) def _alignment_to_fasta(obj, fh, qual=FileSentinel, id_whitespace_replacement='_', description_newline_replacement=' ', max_width=None, lowercase=None): _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase) def _parse_fasta_raw(fh, data_parser, error_type): """Raw parser for FASTA or QUAL files. Returns raw values (seq/qual, id, description). It is the responsibility of the caller to construct the correct in-memory object to hold the data. """ # Skip any blank or whitespace-only lines at beginning of file seq_header = next(_line_generator(fh, skip_blanks=True)) # header check inlined here and below for performance if seq_header.startswith('>'): id_, desc = _parse_fasta_like_header(seq_header) else: raise error_type( "Found non-header line when attempting to read the 1st record:" "\n%s" % seq_header) data_chunks = [] prev = seq_header for line in _line_generator(fh, skip_blanks=False): if line.startswith('>'): # new header, so yield current record and reset state yield data_parser(data_chunks), id_, desc data_chunks = [] id_, desc = _parse_fasta_like_header(line) else: if line: # ensure no blank lines within a single record if not prev: raise error_type( "Found blank or whitespace-only line within record.") data_chunks.append(line) prev = line # yield last record in file yield data_parser(data_chunks), id_, desc def _parse_sequence_data(chunks): if not chunks: raise FASTAFormatError("Found header without sequence data.") return ''.join(chunks) def _parse_quality_scores(chunks): if not chunks: raise QUALFormatError("Found header without quality scores.") qual_str = ' '.join(chunks) try: quality = np.asarray(qual_str.split(), dtype=int) except ValueError: raise QUALFormatError( "Could not convert quality scores to integers:\n%s" % str(qual_str)) if (quality < 0).any(): raise QUALFormatError( "Encountered negative quality score(s). Quality scores must be " "greater than or equal to zero.") if (quality > 255).any(): raise QUALFormatError( "Encountered quality score(s) greater than 255. scikit-bio only " "supports quality scores in the range 0-255 (inclusive) when " "reading QUAL files.") return quality.astype(np.uint8, casting='unsafe', copy=False) def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement, description_newline_replacement, max_width, lowercase=None): def seq_gen(): for seq in obj: yield seq _generator_to_fasta( seq_gen(), fh, qual=qual, id_whitespace_replacement=id_whitespace_replacement, description_newline_replacement=description_newline_replacement, max_width=max_width, lowercase=lowercase)
bsd-3-clause
27,516,236,942,979,196
6,635,511,776,236,986,000
38.468394
79
0.637357
false
Himon-SYNCRAFT/taskplus
tests/core/actions/test_get_task_status_details.py
1
3408
from unittest import mock from taskplus.core.actions import (GetTaskStatusDetailsAction, GetTaskStatusDetailsRequest) from taskplus.core.domain import TaskStatus from taskplus.core.shared.response import ResponseFailure def test_get_status_details_action(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status.id) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is True statuses_repo.one.assert_called_once_with(status.id) assert response.value == status def test_get_status_details_action_with_hooks(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status.id) action = GetTaskStatusDetailsAction(statuses_repo) before = mock.MagicMock() after = mock.MagicMock() action.add_before_execution_hook(before) action.add_after_execution_hook(after) response = action.execute(request) assert before.called assert after.called assert bool(response) is True statuses_repo.one.assert_called_once_with(status.id) assert response.value == status def test_get_status_details_action_handles_bad_request(): status = mock.Mock() status = TaskStatus(name='new', id=1) statuses_repo = mock.Mock() statuses_repo.one.return_value = status request = GetTaskStatusDetailsRequest(status_id=None) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is False assert not statuses_repo.one.called assert response.value == { 'type': ResponseFailure.PARAMETER_ERROR, 'message': 'status_id: is required' } def test_get_status_details_action_handles_generic_error(): error_message = 'Error!!!' statuses_repo = mock.Mock() statuses_repo.one.side_effect = Exception(error_message) request = GetTaskStatusDetailsRequest(status_id=1) action = GetTaskStatusDetailsAction(statuses_repo) response = action.execute(request) assert bool(response) is False statuses_repo.one.assert_called_once_with(1) assert response.value == { 'type': ResponseFailure.SYSTEM_ERROR, 'message': 'Exception: {}'.format(error_message) } def test_get_status_details_request(): status_id = 1 request = GetTaskStatusDetailsRequest(status_id) assert request.is_valid() assert request.status_id == status_id def test_get_status_details_request_without_id(): status_id = None request = GetTaskStatusDetailsRequest(status_id) assert not request.is_valid() assert request.status_id == status_id assert len(request.errors) == 1 error = request.errors[0] assert error.parameter == 'status_id' assert error.message == 'is required' def test_get_status_details_bad_request(): status_id = 'asd' request = GetTaskStatusDetailsRequest(status_id) assert not request.is_valid() assert request.status_id == status_id assert len(request.errors) == 1 error = request.errors[0] assert error.parameter == 'status_id' assert error.message == 'expected int, got str(asd)'
bsd-3-clause
2,787,983,723,441,946,600
-5,790,871,100,610,682,000
29.159292
63
0.701585
false
TOCyna/tabelinha
flask/lib/python2.7/site-packages/werkzeug/test.py
32
34230
# -*- coding: utf-8 -*- """ werkzeug.test ~~~~~~~~~~~~~ This module implements a client to WSGI applications for testing. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import sys import mimetypes from time import time from random import random from itertools import chain from tempfile import TemporaryFile from io import BytesIO try: from urllib2 import Request as U2Request except ImportError: from urllib.request import Request as U2Request try: from http.cookiejar import CookieJar except ImportError: # Py2 from cookielib import CookieJar from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \ string_types, text_type, reraise, wsgi_encoding_dance, \ make_literal_wrapper from werkzeug._internal import _empty_stream, _get_environ from werkzeug.wrappers import BaseRequest from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \ url_unparse, url_parse from werkzeug.wsgi import get_host, get_current_url, ClosingIterator from werkzeug.utils import dump_cookie from werkzeug.datastructures import FileMultiDict, MultiDict, \ CombinedMultiDict, Headers, FileStorage def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset='utf-8'): """Encode a dict of values (either strings or file descriptors or :class:`FileStorage` objects.) into a multipart encoded string stored in a file descriptor. """ if boundary is None: boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random()) _closure = [BytesIO(), 0, False] if use_tempfile: def write_binary(string): stream, total_length, on_disk = _closure if on_disk: stream.write(string) else: length = len(string) if length + _closure[1] <= threshold: stream.write(string) else: new_stream = TemporaryFile('wb+') new_stream.write(stream.getvalue()) new_stream.write(string) _closure[0] = new_stream _closure[2] = True _closure[1] = total_length + length else: write_binary = _closure[0].write def write(string): write_binary(string.encode(charset)) if not isinstance(values, MultiDict): values = MultiDict(values) for key, values in iterlists(values): for value in values: write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key)) reader = getattr(value, 'read', None) if reader is not None: filename = getattr(value, 'filename', getattr(value, 'name', None)) content_type = getattr(value, 'content_type', None) if content_type is None: content_type = filename and \ mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' if filename is not None: write('; filename="%s"\r\n' % filename) else: write('\r\n') write('Content-Type: %s\r\n\r\n' % content_type) while 1: chunk = reader(16384) if not chunk: break write_binary(chunk) else: if not isinstance(value, string_types): value = str(value) else: value = to_bytes(value, charset) write('\r\n\r\n') write_binary(value) write('\r\n') write('--%s--\r\n' % boundary) length = int(_closure[0].tell()) _closure[0].seek(0) return _closure[0], length, boundary def encode_multipart(values, boundary=None, charset='utf-8'): """Like `stream_encode_multipart` but returns a tuple in the form (``boundary``, ``data``) where data is a bytestring. """ stream, length, boundary = stream_encode_multipart( values, use_tempfile=False, boundary=boundary, charset=charset) return boundary, stream.read() def File(fd, filename=None, mimetype=None): """Backwards compat.""" from warnings import warn warn(DeprecationWarning('werkzeug.test.File is deprecated, use the ' 'EnvironBuilder or FileStorage instead')) return FileStorage(fd, filename=filename, content_type=mimetype) class _TestCookieHeaders(object): """A headers adapter for cookielib """ def __init__(self, headers): self.headers = headers def getheaders(self, name): headers = [] name = name.lower() for k, v in self.headers: if k.lower() == name: headers.append(v) return headers def get_all(self, name, default=None): rv = [] for k, v in self.headers: if k.lower() == name.lower(): rv.append(v) return rv or default or [] class _TestCookieResponse(object): """Something that looks like a httplib.HTTPResponse, but is actually just an adapter for our test responses to make them available for cookielib. """ def __init__(self, headers): self.headers = _TestCookieHeaders(headers) def info(self): return self.headers class _TestCookieJar(CookieJar): """A cookielib.CookieJar modified to inject and read cookie headers from and to wsgi environments, and wsgi application responses. """ def inject_wsgi(self, environ): """Inject the cookies as client headers into the server's wsgi environment. """ cvals = [] for cookie in self: cvals.append('%s=%s' % (cookie.name, cookie.value)) if cvals: environ['HTTP_COOKIE'] = '; '.join(cvals) def extract_wsgi(self, environ, headers): """Extract the server's set-cookie headers as cookies into the cookie jar. """ self.extract_cookies( _TestCookieResponse(headers), U2Request(get_current_url(environ)), ) def _iter_data(data): """Iterates over a dict or multidict yielding all keys and values. This is used to iterate over the data passed to the :class:`EnvironBuilder`. """ if isinstance(data, MultiDict): for key, values in iterlists(data): for value in values: yield key, value else: for key, values in iteritems(data): if isinstance(values, list): for value in values: yield key, value else: yield key, values class EnvironBuilder(object): """This class can be used to conveniently create a WSGI environment for testing purposes. It can be used to quickly create WSGI environments or request objects from arbitrary data. The signature of this class is also used in some other places as of Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`, :meth:`Client.open`). Because of this most of the functionality is available through the constructor alone. Files and regular form data can be manipulated independently of each other with the :attr:`form` and :attr:`files` attributes, but are passed with the same argument to the constructor: `data`. `data` can be any of these values: - a `str`: If it's a string it is converted into a :attr:`input_stream`, the :attr:`content_length` is set and you have to provide a :attr:`content_type`. - a `dict`: If it's a dict the keys have to be strings and the values any of the following objects: - a :class:`file`-like object. These are converted into :class:`FileStorage` objects automatically. - a tuple. The :meth:`~FileMultiDict.add_file` method is called with the tuple items as positional arguments. .. versionadded:: 0.6 `path` and `base_url` can now be unicode strings that are encoded using the :func:`iri_to_uri` function. :param path: the path of the request. In the WSGI environment this will end up as `PATH_INFO`. If the `query_string` is not defined and there is a question mark in the `path` everything after it is used as query string. :param base_url: the base URL is a URL that is used to extract the WSGI URL scheme, host (server name + server port) and the script root (`SCRIPT_NAME`). :param query_string: an optional string or dict with URL parameters. :param method: the HTTP method to use, defaults to `GET`. :param input_stream: an optional input stream. Do not specify this and `data`. As soon as an input stream is set you can't modify :attr:`args` and :attr:`files` unless you set the :attr:`input_stream` to `None` again. :param content_type: The content type for the request. As of 0.5 you don't have to provide this when specifying files and form data via `data`. :param content_length: The content length for the request. You don't have to specify this when providing data via `data`. :param errors_stream: an optional error stream that is used for `wsgi.errors`. Defaults to :data:`stderr`. :param multithread: controls `wsgi.multithread`. Defaults to `False`. :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`. :param run_once: controls `wsgi.run_once`. Defaults to `False`. :param headers: an optional list or :class:`Headers` object of headers. :param data: a string or dict of form data. See explanation above. :param environ_base: an optional dict of environment defaults. :param environ_overrides: an optional dict of environment overrides. :param charset: the charset used to encode unicode data. """ #: the server protocol to use. defaults to HTTP/1.1 server_protocol = 'HTTP/1.1' #: the wsgi version to use. defaults to (1, 0) wsgi_version = (1, 0) #: the default request class for :meth:`get_request` request_class = BaseRequest def __init__(self, path='/', base_url=None, query_string=None, method='GET', input_stream=None, content_type=None, content_length=None, errors_stream=None, multithread=False, multiprocess=False, run_once=False, headers=None, data=None, environ_base=None, environ_overrides=None, charset='utf-8'): path_s = make_literal_wrapper(path) if query_string is None and path_s('?') in path: path, query_string = path.split(path_s('?'), 1) self.charset = charset self.path = iri_to_uri(path) if base_url is not None: base_url = url_fix(iri_to_uri(base_url, charset), charset) self.base_url = base_url if isinstance(query_string, (bytes, text_type)): self.query_string = query_string else: if query_string is None: query_string = MultiDict() elif not isinstance(query_string, MultiDict): query_string = MultiDict(query_string) self.args = query_string self.method = method if headers is None: headers = Headers() elif not isinstance(headers, Headers): headers = Headers(headers) self.headers = headers if content_type is not None: self.content_type = content_type if errors_stream is None: errors_stream = sys.stderr self.errors_stream = errors_stream self.multithread = multithread self.multiprocess = multiprocess self.run_once = run_once self.environ_base = environ_base self.environ_overrides = environ_overrides self.input_stream = input_stream self.content_length = content_length self.closed = False if data: if input_stream is not None: raise TypeError('can\'t provide input stream and data') if isinstance(data, text_type): data = data.encode(self.charset) if isinstance(data, bytes): self.input_stream = BytesIO(data) if self.content_length is None: self.content_length = len(data) else: for key, value in _iter_data(data): if isinstance(value, (tuple, dict)) or \ hasattr(value, 'read'): self._add_file_from_data(key, value) else: self.form.setlistdefault(key).append(value) def _add_file_from_data(self, key, value): """Called in the EnvironBuilder to add files from the data dict.""" if isinstance(value, tuple): self.files.add_file(key, *value) elif isinstance(value, dict): from warnings import warn warn(DeprecationWarning('it\'s no longer possible to pass dicts ' 'as `data`. Use tuples or FileStorage ' 'objects instead'), stacklevel=2) value = dict(value) mimetype = value.pop('mimetype', None) if mimetype is not None: value['content_type'] = mimetype self.files.add_file(key, **value) else: self.files.add_file(key, value) def _get_base_url(self): return url_unparse((self.url_scheme, self.host, self.script_root, '', '')).rstrip('/') + '/' def _set_base_url(self, value): if value is None: scheme = 'http' netloc = 'localhost' script_root = '' else: scheme, netloc, script_root, qs, anchor = url_parse(value) if qs or anchor: raise ValueError('base url must not contain a query string ' 'or fragment') self.script_root = script_root.rstrip('/') self.host = netloc self.url_scheme = scheme base_url = property(_get_base_url, _set_base_url, doc=''' The base URL is a URL that is used to extract the WSGI URL scheme, host (server name + server port) and the script root (`SCRIPT_NAME`).''') del _get_base_url, _set_base_url def _get_content_type(self): ct = self.headers.get('Content-Type') if ct is None and not self._input_stream: if self._files: return 'multipart/form-data' elif self._form: return 'application/x-www-form-urlencoded' return None return ct def _set_content_type(self, value): if value is None: self.headers.pop('Content-Type', None) else: self.headers['Content-Type'] = value content_type = property(_get_content_type, _set_content_type, doc=''' The content type for the request. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection.''') del _get_content_type, _set_content_type def _get_content_length(self): return self.headers.get('Content-Length', type=int) def _set_content_length(self, value): if value is None: self.headers.pop('Content-Length', None) else: self.headers['Content-Length'] = str(value) content_length = property(_get_content_length, _set_content_length, doc=''' The content length as integer. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection.''') del _get_content_length, _set_content_length def form_property(name, storage, doc): key = '_' + name def getter(self): if self._input_stream is not None: raise AttributeError('an input stream is defined') rv = getattr(self, key) if rv is None: rv = storage() setattr(self, key, rv) return rv def setter(self, value): self._input_stream = None setattr(self, key, value) return property(getter, setter, doc) form = form_property('form', MultiDict, doc=''' A :class:`MultiDict` of form values.''') files = form_property('files', FileMultiDict, doc=''' A :class:`FileMultiDict` of uploaded files. You can use the :meth:`~FileMultiDict.add_file` method to add new files to the dict.''') del form_property def _get_input_stream(self): return self._input_stream def _set_input_stream(self, value): self._input_stream = value self._form = self._files = None input_stream = property(_get_input_stream, _set_input_stream, doc=''' An optional input stream. If you set this it will clear :attr:`form` and :attr:`files`.''') del _get_input_stream, _set_input_stream def _get_query_string(self): if self._query_string is None: if self._args is not None: return url_encode(self._args, charset=self.charset) return '' return self._query_string def _set_query_string(self, value): self._query_string = value self._args = None query_string = property(_get_query_string, _set_query_string, doc=''' The query string. If you set this to a string :attr:`args` will no longer be available.''') del _get_query_string, _set_query_string def _get_args(self): if self._query_string is not None: raise AttributeError('a query string is defined') if self._args is None: self._args = MultiDict() return self._args def _set_args(self, value): self._query_string = None self._args = value args = property(_get_args, _set_args, doc=''' The URL arguments as :class:`MultiDict`.''') del _get_args, _set_args @property def server_name(self): """The server name (read-only, use :attr:`host` to set)""" return self.host.split(':', 1)[0] @property def server_port(self): """The server port as integer (read-only, use :attr:`host` to set)""" pieces = self.host.split(':', 1) if len(pieces) == 2 and pieces[1].isdigit(): return int(pieces[1]) elif self.url_scheme == 'https': return 443 return 80 def __del__(self): try: self.close() except Exception: pass def close(self): """Closes all files. If you put real :class:`file` objects into the :attr:`files` dict you can call this method to automatically close them all in one go. """ if self.closed: return try: files = itervalues(self.files) except AttributeError: files = () for f in files: try: f.close() except Exception: pass self.closed = True def get_environ(self): """Return the built environ.""" input_stream = self.input_stream content_length = self.content_length content_type = self.content_type if input_stream is not None: start_pos = input_stream.tell() input_stream.seek(0, 2) end_pos = input_stream.tell() input_stream.seek(start_pos) content_length = end_pos - start_pos elif content_type == 'multipart/form-data': values = CombinedMultiDict([self.form, self.files]) input_stream, content_length, boundary = \ stream_encode_multipart(values, charset=self.charset) content_type += '; boundary="%s"' % boundary elif content_type == 'application/x-www-form-urlencoded': #py2v3 review values = url_encode(self.form, charset=self.charset) values = values.encode('ascii') content_length = len(values) input_stream = BytesIO(values) else: input_stream = _empty_stream result = {} if self.environ_base: result.update(self.environ_base) def _path_encode(x): return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset) qs = wsgi_encoding_dance(self.query_string) result.update({ 'REQUEST_METHOD': self.method, 'SCRIPT_NAME': _path_encode(self.script_root), 'PATH_INFO': _path_encode(self.path), 'QUERY_STRING': qs, 'SERVER_NAME': self.server_name, 'SERVER_PORT': str(self.server_port), 'HTTP_HOST': self.host, 'SERVER_PROTOCOL': self.server_protocol, 'CONTENT_TYPE': content_type or '', 'CONTENT_LENGTH': str(content_length or '0'), 'wsgi.version': self.wsgi_version, 'wsgi.url_scheme': self.url_scheme, 'wsgi.input': input_stream, 'wsgi.errors': self.errors_stream, 'wsgi.multithread': self.multithread, 'wsgi.multiprocess': self.multiprocess, 'wsgi.run_once': self.run_once }) for key, value in self.headers.to_wsgi_list(): result['HTTP_%s' % key.upper().replace('-', '_')] = value if self.environ_overrides: result.update(self.environ_overrides) return result def get_request(self, cls=None): """Returns a request with the data. If the request class is not specified :attr:`request_class` is used. :param cls: The request wrapper to use. """ if cls is None: cls = self.request_class return cls(self.get_environ()) class ClientRedirectError(Exception): """ If a redirect loop is detected when using follow_redirects=True with the :cls:`Client`, then this exception is raised. """ class Client(object): """This class allows to send requests to a wrapped application. The response wrapper can be a class or factory function that takes three arguments: app_iter, status and headers. The default response wrapper just returns a tuple. Example:: class ClientResponse(BaseResponse): ... client = Client(MyApplication(), response_wrapper=ClientResponse) The use_cookies parameter indicates whether cookies should be stored and sent for subsequent requests. This is True by default, but passing False will disable this behaviour. If you want to request some subdomain of your application you may set `allow_subdomain_redirects` to `True` as if not no external redirects are allowed. .. versionadded:: 0.5 `use_cookies` is new in this version. Older versions did not provide builtin cookie support. """ def __init__(self, application, response_wrapper=None, use_cookies=True, allow_subdomain_redirects=False): self.application = application self.response_wrapper = response_wrapper if use_cookies: self.cookie_jar = _TestCookieJar() else: self.cookie_jar = None self.allow_subdomain_redirects = allow_subdomain_redirects def set_cookie(self, server_name, key, value='', max_age=None, expires=None, path='/', domain=None, secure=None, httponly=False, charset='utf-8'): """Sets a cookie in the client's cookie jar. The server name is required and has to match the one that is also passed to the open call. """ assert self.cookie_jar is not None, 'cookies disabled' header = dump_cookie(key, value, max_age, expires, path, domain, secure, httponly, charset) environ = create_environ(path, base_url='http://' + server_name) headers = [('Set-Cookie', header)] self.cookie_jar.extract_wsgi(environ, headers) def delete_cookie(self, server_name, key, path='/', domain=None): """Deletes a cookie in the test client.""" self.set_cookie(server_name, key, expires=0, max_age=0, path=path, domain=domain) def run_wsgi_app(self, environ, buffered=False): """Runs the wrapped WSGI app with the given environment.""" if self.cookie_jar is not None: self.cookie_jar.inject_wsgi(environ) rv = run_wsgi_app(self.application, environ, buffered=buffered) if self.cookie_jar is not None: self.cookie_jar.extract_wsgi(environ, rv[2]) return rv def resolve_redirect(self, response, new_location, environ, buffered=False): """Resolves a single redirect and triggers the request again directly on this redirect client. """ scheme, netloc, script_root, qs, anchor = url_parse(new_location) base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/' cur_server_name = netloc.split(':', 1)[0].split('.') real_server_name = get_host(environ).rsplit(':', 1)[0].split('.') if self.allow_subdomain_redirects: allowed = cur_server_name[-len(real_server_name):] == real_server_name else: allowed = cur_server_name == real_server_name if not allowed: raise RuntimeError('%r does not support redirect to ' 'external targets' % self.__class__) status_code = int(response[1].split(None, 1)[0]) if status_code == 307: method = environ['REQUEST_METHOD'] else: method = 'GET' # For redirect handling we temporarily disable the response # wrapper. This is not threadsafe but not a real concern # since the test client must not be shared anyways. old_response_wrapper = self.response_wrapper self.response_wrapper = None try: return self.open(path=script_root, base_url=base_url, query_string=qs, as_tuple=True, buffered=buffered, method=method) finally: self.response_wrapper = old_response_wrapper def open(self, *args, **kwargs): """Takes the same arguments as the :class:`EnvironBuilder` class with some additions: You can provide a :class:`EnvironBuilder` or a WSGI environment as only argument instead of the :class:`EnvironBuilder` arguments and two optional keyword arguments (`as_tuple`, `buffered`) that change the type of the return value or the way the application is executed. .. versionchanged:: 0.5 If a dict is provided as file in the dict for the `data` parameter the content type has to be called `content_type` now instead of `mimetype`. This change was made for consistency with :class:`werkzeug.FileWrapper`. The `follow_redirects` parameter was added to :func:`open`. Additional parameters: :param as_tuple: Returns a tuple in the form ``(environ, result)`` :param buffered: Set this to True to buffer the application run. This will automatically close the application for you as well. :param follow_redirects: Set this to True if the `Client` should follow HTTP redirects. """ as_tuple = kwargs.pop('as_tuple', False) buffered = kwargs.pop('buffered', False) follow_redirects = kwargs.pop('follow_redirects', False) environ = None if not kwargs and len(args) == 1: if isinstance(args[0], EnvironBuilder): environ = args[0].get_environ() elif isinstance(args[0], dict): environ = args[0] if environ is None: builder = EnvironBuilder(*args, **kwargs) try: environ = builder.get_environ() finally: builder.close() response = self.run_wsgi_app(environ, buffered=buffered) # handle redirects redirect_chain = [] while 1: status_code = int(response[1].split(None, 1)[0]) if status_code not in (301, 302, 303, 305, 307) \ or not follow_redirects: break new_location = response[2]['location'] method = 'GET' if status_code == 307: method = environ['REQUEST_METHOD'] new_redirect_entry = (new_location, status_code) if new_redirect_entry in redirect_chain: raise ClientRedirectError('loop detected') redirect_chain.append(new_redirect_entry) environ, response = self.resolve_redirect(response, new_location, environ, buffered=buffered) if self.response_wrapper is not None: response = self.response_wrapper(*response) if as_tuple: return environ, response return response def get(self, *args, **kw): """Like open but method is enforced to GET.""" kw['method'] = 'GET' return self.open(*args, **kw) def patch(self, *args, **kw): """Like open but method is enforced to PATCH.""" kw['method'] = 'PATCH' return self.open(*args, **kw) def post(self, *args, **kw): """Like open but method is enforced to POST.""" kw['method'] = 'POST' return self.open(*args, **kw) def head(self, *args, **kw): """Like open but method is enforced to HEAD.""" kw['method'] = 'HEAD' return self.open(*args, **kw) def put(self, *args, **kw): """Like open but method is enforced to PUT.""" kw['method'] = 'PUT' return self.open(*args, **kw) def delete(self, *args, **kw): """Like open but method is enforced to DELETE.""" kw['method'] = 'DELETE' return self.open(*args, **kw) def options(self, *args, **kw): """Like open but method is enforced to OPTIONS.""" kw['method'] = 'OPTIONS' return self.open(*args, **kw) def trace(self, *args, **kw): """Like open but method is enforced to TRACE.""" kw['method'] = 'TRACE' return self.open(*args, **kw) def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.application ) def create_environ(*args, **kwargs): """Create a new WSGI environ dict based on the values passed. The first parameter should be the path of the request which defaults to '/'. The second one can either be an absolute path (in that case the host is localhost:80) or a full path to the request with scheme, netloc port and the path to the script. This accepts the same arguments as the :class:`EnvironBuilder` constructor. .. versionchanged:: 0.5 This function is now a thin wrapper over :class:`EnvironBuilder` which was added in 0.5. The `headers`, `environ_base`, `environ_overrides` and `charset` parameters were added. """ builder = EnvironBuilder(*args, **kwargs) try: return builder.get_environ() finally: builder.close() def run_wsgi_app(app, environ, buffered=False): """Return a tuple in the form (app_iter, status, headers) of the application output. This works best if you pass it an application that returns an iterator all the time. Sometimes applications may use the `write()` callable returned by the `start_response` function. This tries to resolve such edge cases automatically. But if you don't get the expected output you should set `buffered` to `True` which enforces buffering. If passed an invalid WSGI application the behavior of this function is undefined. Never pass non-conforming WSGI applications to this function. :param app: the application to execute. :param buffered: set to `True` to enforce buffering. :return: tuple in the form ``(app_iter, status, headers)`` """ environ = _get_environ(environ) response = [] buffer = [] def start_response(status, headers, exc_info=None): if exc_info is not None: reraise(*exc_info) response[:] = [status, headers] return buffer.append app_iter = app(environ, start_response) # when buffering we emit the close call early and convert the # application iterator into a regular list if buffered: close_func = getattr(app_iter, 'close', None) try: app_iter = list(app_iter) finally: if close_func is not None: close_func() # otherwise we iterate the application iter until we have # a response, chain the already received data with the already # collected data and wrap it in a new `ClosingIterator` if # we have a close callable. else: while not response: buffer.append(next(app_iter)) if buffer: close_func = getattr(app_iter, 'close', None) app_iter = chain(buffer, app_iter) if close_func is not None: app_iter = ClosingIterator(app_iter, close_func) return app_iter, response[0], Headers(response[1])
gpl-2.0
-1,679,050,292,771,921,700
-4,857,412,923,141,352,000
37.374439
82
0.582676
false
kennydude/django-rest-framework
tests/test_htmlrenderer.py
79
4406
from __future__ import unicode_literals import django.template.loader from django.conf.urls import url from django.core.exceptions import PermissionDenied from django.http import Http404 from django.template import Template, TemplateDoesNotExist from django.test import TestCase from django.utils import six from rest_framework import status from rest_framework.decorators import api_view, renderer_classes from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.response import Response @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def example(request): """ A view that can returns an HTML representation. """ data = {'object': 'foobar'} return Response(data, template_name='example.html') @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def permission_denied(request): raise PermissionDenied() @api_view(('GET',)) @renderer_classes((TemplateHTMLRenderer,)) def not_found(request): raise Http404() urlpatterns = [ url(r'^$', example), url(r'^permission_denied$', permission_denied), url(r'^not_found$', not_found), ] class TemplateHTMLRendererTests(TestCase): urls = 'tests.test_htmlrenderer' def setUp(self): """ Monkeypatch get_template """ self.get_template = django.template.loader.get_template def get_template(template_name, dirs=None): if template_name == 'example.html': return Template("example: {{ object }}") raise TemplateDoesNotExist(template_name) def select_template(template_name_list, dirs=None, using=None): if template_name_list == ['example.html']: return Template("example: {{ object }}") raise TemplateDoesNotExist(template_name_list[0]) django.template.loader.get_template = get_template django.template.loader.select_template = select_template def tearDown(self): """ Revert monkeypatching """ django.template.loader.get_template = self.get_template def test_simple_html_view(self): response = self.client.get('/') self.assertContains(response, "example: foobar") self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_not_found_html_view(self): response = self.client.get('/not_found') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertEqual(response.content, six.b("404 Not Found")) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_permission_denied_html_view(self): response = self.client.get('/permission_denied') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.content, six.b("403 Forbidden")) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') class TemplateHTMLRendererExceptionTests(TestCase): urls = 'tests.test_htmlrenderer' def setUp(self): """ Monkeypatch get_template """ self.get_template = django.template.loader.get_template def get_template(template_name): if template_name == '404.html': return Template("404: {{ detail }}") if template_name == '403.html': return Template("403: {{ detail }}") raise TemplateDoesNotExist(template_name) django.template.loader.get_template = get_template def tearDown(self): """ Revert monkeypatching """ django.template.loader.get_template = self.get_template def test_not_found_html_view_with_template(self): response = self.client.get('/not_found') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertTrue(response.content in ( six.b("404: Not found"), six.b("404 Not Found"))) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') def test_permission_denied_html_view_with_template(self): response = self.client.get('/permission_denied') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertTrue(response.content in ( six.b("403: Permission denied"), six.b("403 Forbidden"))) self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
bsd-2-clause
1,085,852,497,886,266,200
-2,981,518,110,136,159,700
33.421875
78
0.662733
false
codeforamerica/comport
migrations/versions/0d78d545906f_.py
1
1135
"""Add 'is_public' flags for datasets Revision ID: 0d78d545906f Revises: 6d30846080b2 Create Date: 2016-06-27 15:30:14.415519 """ # revision identifiers, used by Alembic. revision = '0d78d545906f' down_revision = '6d30846080b2' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('departments', sa.Column('is_public_assaults_on_officers', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_citizen_complaints', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_officer_involved_shootings', sa.Boolean(), server_default=sa.true(), nullable=False)) op.add_column('departments', sa.Column('is_public_use_of_force_incidents', sa.Boolean(), server_default=sa.true(), nullable=False)) def downgrade(): op.drop_column('departments', 'is_public_use_of_force_incidents') op.drop_column('departments', 'is_public_officer_involved_shootings') op.drop_column('departments', 'is_public_citizen_complaints') op.drop_column('departments', 'is_public_assaults_on_officers')
bsd-3-clause
-8,654,173,057,296,061,000
7,167,935,091,166,967,000
39.535714
139
0.732159
false
alihalabyah/flexx
exp/wgui.py
22
5614
# -*- coding: utf-8 -*- # Copyright (c) 2014, Almar Klein """ Little experiment for the purpose for creating a GUI toolkit based on web technologies like HTML/CSS/JS. Applications build with such a GUI can be easily deployed on all platforms and also run in a web browser... Usefull links: * http://www.aclevername.com/articles/python-webgui/ """ import time #from zoof.qt import QtCore, QtGui, QtWebKit from PyQt4 import QtCore, QtGui, QtWebKit HTML = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> <head> <title></title> <link href="demo.css" rel="stylesheet" type="text/css"></link> <!-- <script src="jquery-1.11.1.min.js"></script> --> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script> <script type="text/javascript"> // <![CDATA[ function send(msg) { // we communicate to Python by modifying the title document.title = "null"; document.title = msg; } function got_a_click(e) { send('got-a-click:' + e.target.id); } function got_a_move(e) { if (e.clientX & e.clientY) { //send('got-a-move:' + e.target.id); send('got-a-move:' + e.target.id + '-' + e.clientX + ',' + e.clientY); } } $(document).ready(function() { $('#messages').click(got_a_click); //send($.toJSON('document.ready')); send('document.ready'); }) // ]]> </script> </head> <body> <h1>Python + Web GUI Demo</h1> <h2>Uptime</h2> <p class="uptime"> Python uptime: <span id="uptime-value">?</span> seconds. </p> <h2>Messages</h2> <p id="messages"> Click here (yes, anywhere here)...<br/> </p> </body> </html> """ class Page(QtWebKit.QWebPage): """ Subclass Pagse to catch JS errors and prompts. """ def javaScriptConsoleMessage(self, msg, linenr, sourceID): print('ERROR: on line %i in %r: %s' % (linenr, sourceID, msg)) def javaScriptAlert(self, frame, msg): print('ALERT:', msg) def javaScriptConfirm(self, frame, msg): while True: a = input('Need confirm from JS: msg [Y/n] ') if not a or a.lower() == 'y': return True elif a.lower() == 'n': return False def javaScriptPrompt(self, frame, *args): pass # todo class Main(QtWebKit.QWebView): """ Our main application window. """ def __init__(self): super().__init__(None) self.setPage(Page(self)) self.page().mainFrame().setHtml(HTML) self.titleChanged.connect(self.on_title_changed) self._timer = QtCore.QTimer() self._timer.setSingleShot(False) self._timer.timeout.connect(self.on_timer) self._timer.start(207) self._t0 = time.time() def on_error(self, msg): print('ERROR:', msg) def on_timer(self): t = time.time() - self._t0 msg = 'document.getElementById("uptime-value").innerHTML = %1.01f' % t self.web_send(msg) def web_send(self, msg): f = self.page().mainFrame() f.evaluateJavaScript(msg) def on_title_changed(self, title): if title == 'null': return print('MSG:', title) if title.startswith("got-a-move:test-widget"): xy = title.split('-')[-1] x, y = [int(i)-20 for i in xy.split(',')] msg = 'document.getElementById("test-widget").style.left = "%ipx";' % x msg += 'document.getElementById("test-widget").style.top = "%ipx";' % y self.web_send(msg) print(title) if title == "got-a-click:messages": #self.web_send("confirm('Please confitm');") #self.web_send("alert('wooot');") self.web_send(""" $(document.body).append("<div id='test-widget' class='draggable'>This is a paragraph</div>"); $("#test-widget").css({ "width": "100px", "height": "35px", "position":"absolute", "top":"100px", "left":"100px", "background": "red", "overflow":"hidden", "user-select": "none", "handle": "", "cursor": "move", }); // Implement some dragging (sort of) $("#test-widget")._down = false; $("#test-widget").mousedown(function(e){this._down=true}); $("#test-widget").mouseup(function(e){this._down=false}); $("#test-widget").mouseleave(function(e){this._down=false}); $("#test-widget").mousemove(function(e){if (this._down) {got_a_move(e);}}); """) if __name__ == '__main__': app = QtGui.QApplication([]) m = Main() m.show() app.exec_()
bsd-2-clause
-8,637,080,103,860,454,000
3,664,462,770,629,574,700
28.239583
117
0.490203
false
forseti-security/forseti-security
google/cloud/forseti/common/gcp_api/admin_directory.py
1
10459
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrapper for Admin Directory API client.""" from builtins import object from googleapiclient import errors from httplib2 import HttpLib2Error from google.auth.exceptions import RefreshError from google.cloud.forseti.common.gcp_api import _base_repository from google.cloud.forseti.common.gcp_api import api_helpers from google.cloud.forseti.common.gcp_api import errors as api_errors from google.cloud.forseti.common.gcp_api import repository_mixins from google.cloud.forseti.common.util import logger LOGGER = logger.get_logger(__name__) API_NAME = 'admin' REQUIRED_SCOPES = frozenset([ 'https://www.googleapis.com/auth/admin.directory.group.readonly', 'https://www.googleapis.com/auth/admin.directory.user.readonly' ]) GSUITE_AUTH_FAILURE_MESSAGE = ( 'Failed to retrieve G Suite data due to authentication ' 'failure. Please make sure your forseti_server_config.yaml ' 'file contains the most updated information and enable G ' 'Suite Groups Collection if you haven\'t done so. Instructions' ' on how to enable: https://forsetisecurity.org/docs/latest/' 'configure/inventory/gsuite.html') class AdminDirectoryRepositoryClient(_base_repository.BaseRepositoryClient): """Admin Directory API Respository Client.""" def __init__(self, credentials, quota_max_calls=None, quota_period=1.0, use_rate_limiter=True, cache_discovery=False, cache=None): """Constructor. Args: credentials (object): An google.auth credentials object. The admin directory API needs a service account credential with delegated super admin role. quota_max_calls (int): Allowed requests per <quota_period> for the API. quota_period (float): The time period to track requests over. use_rate_limiter (bool): Set to false to disable the use of a rate limiter for this service. cache_discovery (bool): When set to true, googleapiclient will cache HTTP requests to API discovery endpoints. cache (googleapiclient.discovery_cache.base.Cache): instance of a class that can cache API discovery documents. If None, googleapiclient will attempt to choose a default. """ if not quota_max_calls: use_rate_limiter = False self._groups = None self._members = None self._users = None super(AdminDirectoryRepositoryClient, self).__init__( API_NAME, versions=['directory_v1'], credentials=credentials, quota_max_calls=quota_max_calls, quota_period=quota_period, use_rate_limiter=use_rate_limiter, cache_discovery=cache_discovery, cache=cache) # Turn off docstrings for properties. # pylint: disable=missing-return-doc, missing-return-type-doc @property def groups(self): """Returns an _AdminDirectoryGroupsRepository instance.""" if not self._groups: self._groups = self._init_repository( _AdminDirectoryGroupsRepository) return self._groups @property def members(self): """Returns an _AdminDirectoryMembersRepository instance.""" if not self._members: self._members = self._init_repository( _AdminDirectoryMembersRepository) return self._members @property def users(self): """Returns an _AdminDirectoryUsersRepository instance.""" if not self._users: self._users = self._init_repository( _AdminDirectoryUsersRepository) return self._users # pylint: enable=missing-return-doc, missing-return-type-doc class _AdminDirectoryGroupsRepository( repository_mixins.ListQueryMixin, _base_repository.GCPRepository): """Implementation of Admin Directory Groups repository.""" def __init__(self, **kwargs): """Constructor. Args: **kwargs (dict): The args to pass into GCPRepository.__init__() """ super(_AdminDirectoryGroupsRepository, self).__init__( key_field='', component='groups', **kwargs) class _AdminDirectoryMembersRepository( repository_mixins.ListQueryMixin, _base_repository.GCPRepository): """Implementation of Admin Directory Members repository.""" def __init__(self, **kwargs): """Constructor. Args: **kwargs (dict): The args to pass into GCPRepository.__init__() """ super(_AdminDirectoryMembersRepository, self).__init__( key_field='groupKey', component='members', **kwargs) class _AdminDirectoryUsersRepository( repository_mixins.ListQueryMixin, _base_repository.GCPRepository): """Implementation of Admin Directory Users repository.""" def __init__(self, **kwargs): """Constructor. Args: **kwargs (dict): The args to pass into GCPRepository.__init__() """ super(_AdminDirectoryUsersRepository, self).__init__( key_field='', component='users', **kwargs) class AdminDirectoryClient(object): """GSuite Admin Directory API Client.""" def __init__(self, global_configs, **kwargs): """Initialize. Args: global_configs (dict): Global configurations. **kwargs (dict): The kwargs. """ credentials = api_helpers.get_delegated_credential( global_configs.get('domain_super_admin_email'), REQUIRED_SCOPES) max_calls, quota_period = api_helpers.get_ratelimiter_config( global_configs, API_NAME) cache_discovery = global_configs[ 'cache_discovery'] if 'cache_discovery' in global_configs else False self.repository = AdminDirectoryRepositoryClient( credentials=credentials, quota_max_calls=max_calls, quota_period=quota_period, use_rate_limiter=kwargs.get('use_rate_limiter', True), cache_discovery=cache_discovery, cache=global_configs.get('cache')) def get_group_members(self, group_key): """Get all the members for specified groups. Args: group_key (str): The group's unique id assigned by the Admin API. Returns: list: A list of member objects from the API. Raises: api_errors.ApiExecutionError: If group member retrieval fails. """ try: paged_results = self.repository.members.list(group_key) result = api_helpers.flatten_list_results(paged_results, 'members') LOGGER.debug('Getting all the members for group_key = %s,' ' result = %s', group_key, result) return result except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(group_key, e) def get_groups(self, customer_id='my_customer'): """Get all the groups for a given customer_id. A note on customer_id='my_customer'. This is a magic string instead of using the real customer id. See: https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups Args: customer_id (str): The customer id to scope the request to. Returns: list: A list of group objects returned from the API. Raises: api_errors.ApiExecutionError: If groups retrieval fails. RefreshError: If the authentication fails. """ try: paged_results = self.repository.groups.list(customer=customer_id) flattened_results = api_helpers.flatten_list_results( paged_results, 'groups') LOGGER.debug('Getting all the groups for customer_id = %s,' ' flattened_results = %s', customer_id, flattened_results) return flattened_results except RefreshError as e: # Authentication failed, log before raise. LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE) raise e except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError('groups', e) def get_users(self, customer_id='my_customer'): """Get all the users for a given customer_id. A note on customer_id='my_customer'. This is a magic string instead of using the real customer id. See: https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups Args: customer_id (str): The customer id to scope the request to. Returns: list: A list of user objects returned from the API. Raises: api_errors.ApiExecutionError: If groups retrieval fails. RefreshError: If the authentication fails. """ try: paged_results = self.repository.users.list(customer=customer_id, viewType='admin_view') flattened_results = api_helpers.flatten_list_results( paged_results, 'users') LOGGER.debug('Getting all the users for customer_id = %s,' ' flattened_results = %s', customer_id, flattened_results) return flattened_results except RefreshError as e: # Authentication failed, log before raise. LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE) raise e except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError('users', e)
apache-2.0
-6,559,875,070,503,918,000
-8,229,640,658,302,804,000
37.032727
103
0.627115
false
kwlzn/pants
src/python/pants/base/payload_field.py
8
4251
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import json from abc import abstractmethod from hashlib import sha1 from twitter.common.collections import OrderedSet from pants.util.meta import AbstractClass def stable_json_dumps(obj): return json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True) def stable_json_sha1(obj): """ :API: public """ return sha1(stable_json_dumps(obj)).hexdigest() def combine_hashes(hashes): """A simple helper function to combine other hashes. Sorts the hashes before rolling them in.""" hasher = sha1() for h in sorted(hashes): hasher.update(h) return hasher.hexdigest() class PayloadField(AbstractClass): """An immutable, hashable structure to be mixed into Payload instances. :API: public """ _fingerprint_memo = None def fingerprint(self): """A memoized sha1 hexdigest hashing the contents of this PayloadField The fingerprint returns either a bytestring or None. If the return is None, consumers of the fingerprint may choose to elide this PayloadField from their combined hash computation. :API: public """ if self._fingerprint_memo is None: self._fingerprint_memo = self._compute_fingerprint() return self._fingerprint_memo def mark_dirty(self): """Invalidates the memoized fingerprint for this field. Exposed for testing. :API: public """ self._fingerprint_memo = None @abstractmethod def _compute_fingerprint(self): """This method will be called and the result memoized for ``PayloadField.fingerprint``.""" pass @property def value(self): """ :API: public """ return self class FingerprintedMixin(object): """Mixin this class to make your class suitable for passing to FingerprintedField. :API: public """ def fingerprint(self): """Override this method to implement a fingerprint for your class. :API: public :returns: a sha1 hexdigest hashing the contents of this structure.""" raise NotImplementedError() class FingerprintedField(PayloadField): """Use this field to fingerprint any class that mixes in FingerprintedMixin. The caller must ensure that the class properly implements fingerprint() to hash the contents of the object. :API: public """ def __init__(self, value): self._value = value def _compute_fingerprint(self): return self._value.fingerprint() @property def value(self): return self._value class PythonRequirementsField(frozenset, PayloadField): """A frozenset subclass that mixes in PayloadField. Must be initialized with an iterable of PythonRequirement instances. :API: public """ def _compute_fingerprint(self): def fingerprint_iter(): for req in self: hash_items = ( repr(req._requirement), req._repository, req._name, req._use_2to3, req.compatibility, ) yield stable_json_sha1(hash_items) return combine_hashes(fingerprint_iter()) class ExcludesField(OrderedSet, PayloadField): """An OrderedSet subclass that mixes in PayloadField. Must be initialized with an iterable of Excludes instances. :API: public """ def _compute_fingerprint(self): return stable_json_sha1(tuple(repr(exclude) for exclude in self)) class JarsField(tuple, PayloadField): """A tuple subclass that mixes in PayloadField. Must be initialized with an iterable of JarDependency instances. :API: public """ def _compute_fingerprint(self): return stable_json_sha1(tuple(jar.cache_key() for jar in self)) class PrimitiveField(PayloadField): """A general field for primitive types. As long as the contents are JSON representable, their hash can be stably inferred. :API: public """ def __init__(self, underlying=None): self._underlying = underlying @property def value(self): return self._underlying def _compute_fingerprint(self): return stable_json_sha1(self._underlying)
apache-2.0
3,014,135,194,330,294,300
8,477,899,619,957,186,000
23.431034
99
0.702658
false
leiferikb/bitpop
build/third_party/twisted_10_2/twisted/persisted/sob.py
60
6366
# -*- test-case-name: twisted.test.test_sob -*- # Copyright (c) 2001-2008 Twisted Matrix Laboratories. # See LICENSE for details. # """ Save and load Small OBjects to and from files, using various formats. Maintainer: Moshe Zadka """ import os, sys try: import cPickle as pickle except ImportError: import pickle try: import cStringIO as StringIO except ImportError: import StringIO from twisted.python import log, runtime from twisted.python.hashlib import md5 from twisted.persisted import styles from zope.interface import implements, Interface # Note: # These encrypt/decrypt functions only work for data formats # which are immune to having spaces tucked at the end. # All data formats which persist saves hold that condition. def _encrypt(passphrase, data): from Crypto.Cipher import AES as cipher leftover = len(data) % cipher.block_size if leftover: data += ' '*(cipher.block_size - leftover) return cipher.new(md5(passphrase).digest()[:16]).encrypt(data) def _decrypt(passphrase, data): from Crypto.Cipher import AES return AES.new(md5(passphrase).digest()[:16]).decrypt(data) class IPersistable(Interface): """An object which can be saved in several formats to a file""" def setStyle(style): """Set desired format. @type style: string (one of 'pickle' or 'source') """ def save(tag=None, filename=None, passphrase=None): """Save object to file. @type tag: string @type filename: string @type passphrase: string """ class Persistent: implements(IPersistable) style = "pickle" def __init__(self, original, name): self.original = original self.name = name def setStyle(self, style): """Set desired format. @type style: string (one of 'pickle' or 'source') """ self.style = style def _getFilename(self, filename, ext, tag): if filename: finalname = filename filename = finalname + "-2" elif tag: filename = "%s-%s-2.%s" % (self.name, tag, ext) finalname = "%s-%s.%s" % (self.name, tag, ext) else: filename = "%s-2.%s" % (self.name, ext) finalname = "%s.%s" % (self.name, ext) return finalname, filename def _saveTemp(self, filename, passphrase, dumpFunc): f = open(filename, 'wb') if passphrase is None: dumpFunc(self.original, f) else: s = StringIO.StringIO() dumpFunc(self.original, s) f.write(_encrypt(passphrase, s.getvalue())) f.close() def _getStyle(self): if self.style == "source": from twisted.persisted.aot import jellyToSource as dumpFunc ext = "tas" else: def dumpFunc(obj, file): pickle.dump(obj, file, 2) ext = "tap" return ext, dumpFunc def save(self, tag=None, filename=None, passphrase=None): """Save object to file. @type tag: string @type filename: string @type passphrase: string """ ext, dumpFunc = self._getStyle() if passphrase: ext = 'e' + ext finalname, filename = self._getFilename(filename, ext, tag) log.msg("Saving "+self.name+" application to "+finalname+"...") self._saveTemp(filename, passphrase, dumpFunc) if runtime.platformType == "win32" and os.path.isfile(finalname): os.remove(finalname) os.rename(filename, finalname) log.msg("Saved.") # "Persistant" has been present since 1.0.7, so retain it for compatibility Persistant = Persistent class _EverythingEphemeral(styles.Ephemeral): initRun = 0 def __init__(self, mainMod): """ @param mainMod: The '__main__' module that this class will proxy. """ self.mainMod = mainMod def __getattr__(self, key): try: return getattr(self.mainMod, key) except AttributeError: if self.initRun: raise else: log.msg("Warning! Loading from __main__: %s" % key) return styles.Ephemeral() def load(filename, style, passphrase=None): """Load an object from a file. Deserialize an object from a file. The file can be encrypted. @param filename: string @param style: string (one of 'pickle' or 'source') @param passphrase: string """ mode = 'r' if style=='source': from twisted.persisted.aot import unjellyFromSource as _load else: _load, mode = pickle.load, 'rb' if passphrase: fp = StringIO.StringIO(_decrypt(passphrase, open(filename, 'rb').read())) else: fp = open(filename, mode) ee = _EverythingEphemeral(sys.modules['__main__']) sys.modules['__main__'] = ee ee.initRun = 1 try: value = _load(fp) finally: # restore __main__ if an exception is raised. sys.modules['__main__'] = ee.mainMod styles.doUpgrade() ee.initRun = 0 persistable = IPersistable(value, None) if persistable is not None: persistable.setStyle(style) return value def loadValueFromFile(filename, variable, passphrase=None): """Load the value of a variable in a Python file. Run the contents of the file, after decrypting if C{passphrase} is given, in a namespace and return the result of the variable named C{variable}. @param filename: string @param variable: string @param passphrase: string """ if passphrase: mode = 'rb' else: mode = 'r' fileObj = open(filename, mode) d = {'__file__': filename} if passphrase: data = fileObj.read() data = _decrypt(passphrase, data) exec data in d, d else: exec fileObj in d, d value = d[variable] return value def guessType(filename): ext = os.path.splitext(filename)[1] return { '.tac': 'python', '.etac': 'python', '.py': 'python', '.tap': 'pickle', '.etap': 'pickle', '.tas': 'source', '.etas': 'source', }[ext] __all__ = ['loadValueFromFile', 'load', 'Persistent', 'Persistant', 'IPersistable', 'guessType']
gpl-3.0
-237,934,766,297,479,000
-7,109,578,826,378,533,000
27.044053
75
0.59158
false
madgik/exareme
Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/dummycoding.py
1
2450
import setpath import functions import json registered=True def convert(data): if isinstance(data, basestring): return str(data) elif isinstance(data, collections.Mapping): return dict(map(convert, data.iteritems())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data class dummycoding(functions.vtable.vtbase.VT): def VTiter(self, *parsedArgs,**envars): largs, dictargs = self.full_parse(parsedArgs) if 'query' not in dictargs: raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ") query = dictargs['query'] if 'metadata' not in dictargs: raise functions.OperatorError(__name__.rsplit('.')[-1],"No metadata ") metadata = json.loads(dictargs['metadata']) cur = envars['db'].cursor() c=cur.execute(query) schema = cur.getdescriptionsafe() no = 0 for myrow in c: first_tuple = [] schema1 = [] for item in xrange(len(schema)): if schema[item][0] in metadata: vals = metadata[schema[item][0]].split(',') vals.sort() for v in vals: newv = str(schema[item][0]) + '(' + str(v) + ')' schema1.append(newv) if myrow[item] == v: first_tuple.append(1) else : first_tuple.append(0) else: # print 'no', schema[item][0] newv = str(schema[item][0]) schema1.append(newv) first_tuple.append(myrow[item]) if no == 0: # print tuple((x,) for x in schema1) yield tuple((x,) for x in schema1) no =no+1 # print str(first_tuple) yield tuple(first_tuple,) def Source(): return functions.vtable.vtbase.VTGenerator(dummycoding) if not ('.' in __name__): """ This is needed to be able to test the function, put it at the end of every new function you create """ import sys import setpath from functions import * testfunction() if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') import doctest doctest.tes
mit
-270,748,812,337,883,140
8,593,330,888,875,921,000
29.259259
88
0.517959
false
sloria/TextBlob
tests/test_inflect.py
2
1177
from nose.tools import assert_equals, assert_true from unittest import TestCase from textblob.en.inflect import ( plural_categories, singular_ie, singular_irregular, singular_uncountable, singular_uninflected, singularize, pluralize ) class InflectTestCase(TestCase): def s_singular_pluralize_test(self): assert_equals(pluralize('lens'), 'lenses') def s_singular_singularize_test(self): assert_equals(singularize('lenses'), 'lens') def diagnoses_singularize_test(self): assert_equals(singularize('diagnoses'), 'diagnosis') def bus_pluralize_test(self): assert_equals(pluralize('bus'), 'buses') def test_all_singular_s(self): for w in plural_categories['s-singular']: assert_equals(singularize(pluralize(w)), w) def test_all_singular_ie(self): for w in singular_ie: assert_true(pluralize(w).endswith('ies')) assert_equals(singularize(pluralize(w)), w) def test_all_singular_irregular(self): for singular_w in singular_irregular.values(): assert_equals(singular_irregular[pluralize(singular_w)], singular_w)
mit
-701,769,194,142,295,900
8,279,468,451,883,591,000
27.707317
80
0.669499
false
ahaym/eden
modules/s3db/cms.py
4
67968
# -*- coding: utf-8 -*- """ Sahana Eden Content Management System Model @copyright: 2012-2015 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3ContentModel", "S3ContentMapModel", "S3ContentOrgModel", "S3ContentOrgGroupModel", "S3ContentUserModel", "cms_index", "cms_documentation", "cms_rheader", "cms_customise_post_fields", "cms_post_list_layout", "S3CMS", ) try: import json # try stdlib (Python 2.6) except ImportError: try: import simplejson as json # try external module except: import gluon.contrib.simplejson as json # fallback to pure-Python module from gluon import * from gluon.storage import Storage from ..s3 import * from s3layouts import S3AddResourceLink # Compact JSON encoding SEPARATORS = (",", ":") # ============================================================================= class S3ContentModel(S3Model): """ Content Management System """ names = ("cms_series", "cms_post", "cms_post_id", "cms_post_module", "cms_tag", "cms_tag_post", "cms_comment", ) def model(self): T = current.T db = current.db add_components = self.add_components configure = self.configure crud_strings = current.response.s3.crud_strings define_table = self.define_table set_method = self.set_method settings = current.deployment_settings # --------------------------------------------------------------------- # Series # - lists of Posts displaying in recent-first mode # tablename = "cms_series" define_table(tablename, Field("name", length=255, notnull=True, unique=True, label = T("Name"), ), Field("avatar", "boolean", default = False, label = T("Show author picture?"), represent = s3_yes_no_represent, ), Field("location", "boolean", default = False, label = T("Show Location?"), represent = s3_yes_no_represent, ), Field("richtext", "boolean", default = True, label = T("Rich Text?"), represent = s3_yes_no_represent, ), Field("replies", "boolean", default = False, label = T("Comments permitted?"), represent = s3_yes_no_represent, ), s3_comments(), # Multiple Roles (@ToDo: Implement the restriction) s3_roles_permitted(readable = False, writable = False ), *s3_meta_fields()) # CRUD Strings ADD_SERIES = T("Create Series") crud_strings[tablename] = Storage( label_create = ADD_SERIES, title_display = T("Series Details"), title_list = T("Series"), title_update = T("Edit Series"), title_upload = T("Import Series"), label_list_button = T("List Series"), msg_record_created = T("Series added"), msg_record_modified = T("Series updated"), msg_record_deleted = T("Series deleted"), msg_list_empty = T("No series currently defined")) # Reusable field translate = settings.get_L10n_translate_cms_series() represent = S3Represent(lookup=tablename, translate=translate) series_id = S3ReusableField("series_id", "reference %s" % tablename, label = T("Type"), # Even if this isn't always the use-case ondelete = "CASCADE", readable = False, writable = False, represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF(db, "cms_series.id", represent)), ) # Resource Configuration configure(tablename, create_next = URL(f="series", args=["[id]", "post"]), onaccept = self.cms_series_onaccept, ) # Components add_components(tablename, cms_post = "series_id", ) # --------------------------------------------------------------------- # Posts # - single blocks of [rich] text which can be embedded into a page, # be viewed as full pages or as part of a Series # if settings.get_cms_richtext(): body_represent = lambda body: XML(body) body_widget = s3_richtext_widget else: body_represent = lambda body: XML(s3_URLise(body)) body_widget = None tablename = "cms_post" define_table(tablename, self.super_link("doc_id", "doc_entity"), series_id(), Field("name", #notnull=True, comment = T("This isn't visible to the published site, but is used to allow menu items to point to the page"), label = T("Name"), ), Field("title", comment = T("The title of the page, as seen in the browser (optional)"), label = T("Title"), ), Field("body", "text", notnull=True, label = T("Body"), represent = body_represent, widget = body_widget, ), # @ToDo: Move this to link table? # - although this makes widget hard! self.gis_location_id(), # @ToDo: Move this to link table? # - although this makes widget hard! self.pr_person_id(label = T("Contact"), # Enable only in certain conditions readable = False, writable = False, ), Field("avatar", "boolean", default = False, label = T("Show author picture?"), represent = s3_yes_no_represent, ), Field("replies", "boolean", default = False, label = T("Comments permitted?"), represent = s3_yes_no_represent, ), s3_datetime(default = "now"), # @ToDo: Also have a datetime for 'Expires On' Field("expired", "boolean", default = False, label = T("Expired?"), represent = s3_yes_no_represent, ), #Field("published", "boolean", # default=True, # label=T("Published")), s3_comments(), # Multiple Roles (@ToDo: Implement the restriction) s3_roles_permitted(readable = False, writable = False ), *s3_meta_fields()) # CRUD Strings ADD_POST = T("Create Post") crud_strings[tablename] = Storage( label_create = ADD_POST, title_display = T("Post Details"), title_list = T("Posts"), title_update = T("Edit Post"), title_upload = T("Import Posts"), label_list_button = T("List Posts"), msg_record_created = T("Post added"), msg_record_modified = T("Post updated"), msg_record_deleted = T("Post deleted"), msg_list_empty = T("No posts currently available")) # Reusable field represent = S3Represent(lookup=tablename) post_id = S3ReusableField("post_id", "reference %s" % tablename, comment = S3AddResourceLink(c="cms", f="post", title=ADD_POST, tooltip=T("A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.")), label = T("Post"), ondelete = "CASCADE", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF(db, "cms_post.id", represent)), sortby = "name", ) list_fields = ["title", "body", "location_id", "date", "expired", "comments" ] org_field = settings.get_cms_organisation() if org_field == "created_by$organisation_id": org_field = "auth_user.organisation_id" elif org_field == "post_organisation.organisation_id": org_field = "cms_post_organisation.organisation_id" if org_field: list_fields.append(org_field) filter_widgets = [S3TextFilter(["body"], label = T("Search"), _class = "filter-search", #_placeholder = T("Search").upper(), ), S3OptionsFilter("series_id", label = T("Type"), hidden = True, ), S3LocationFilter("location_id", label = T("Location"), hidden = True, ), S3OptionsFilter("created_by$organisation_id", label = T("Organization"), # Can't use this for integers, use field.represent instead #represent = "%(name)s", hidden = True, ), S3DateFilter("created_on", label = T("Date"), hide_time = True, hidden = True, ), ] # Resource Configuration configure(tablename, context = {"event": "event.id", "incident": "incident.id", "location": "location_id", "organisation": "created_by$organisation_id", }, deduplicate = self.cms_post_duplicate, filter_actions = [{"label": "Open Table", "icon": "table", "function": "newsfeed", "method": "datalist", }, {"label": "Open Map", "icon": "globe", "method": "map", }, {"label": "Open RSS Feed", "icon": "rss", "format": "rss", }, ], filter_widgets = filter_widgets, list_fields = list_fields, list_layout = cms_post_list_layout, list_orderby = "cms_post.date desc", onaccept = self.cms_post_onaccept, orderby = "cms_post.date desc", summary = [{"name": "table", "label": "Table", "widgets": [{"method": "datatable"}] }, #{"name": "report", # "label": "Report", # "widgets": [{"method": "report", # "ajax_init": True}] # }, {"name": "map", "label": "Map", "widgets": [{"method": "map", "ajax_init": True}], }, ], super_entity = "doc_entity", ) # Components add_components(tablename, cms_comment = "post_id", cms_post_layer = "post_id", cms_post_module = "post_id", cms_post_user = {"name": "bookmark", "joinby": "post_id", }, cms_tag = {"link": "cms_tag_post", "joinby": "post_id", "key": "tag_id", "actuate": "hide", }, # For filter widget cms_tag_post = "post_id", cms_post_organisation = {"joinby": "post_id", # @ToDo: deployment_setting "multiple": False, }, cms_post_organisation_group = {"joinby": "post_id", # @ToDo: deployment_setting "multiple": False, }, # For InlineForm to tag Posts to Events/Incidents/Incident Types event_post = (# Events {"name": "event_post", "joinby": "post_id", }, # Incidents {"name": "incident_post", "joinby": "post_id", } ), event_post_incident_type = "post_id", # For Profile to filter appropriately event_event = {"link": "event_post", "joinby": "post_id", "key": "event_id", "actuate": "hide", }, event_incident = {"link": "event_post", "joinby": "post_id", "key": "incident_id", "actuate": "hide", }, event_incident_type = {"link": "event_post_incident_type", "joinby": "post_id", "key": "incident_type_id", "actuate": "hide", }, ) # Custom Methods set_method("cms", "post", method = "add_tag", action = self.cms_add_tag) set_method("cms", "post", method = "remove_tag", action = self.cms_remove_tag) set_method("cms", "post", method = "add_bookmark", action = self.cms_add_bookmark) set_method("cms", "post", method = "remove_bookmark", action = self.cms_remove_bookmark) # --------------------------------------------------------------------- # Modules/Resources <> Posts link table # tablename = "cms_post_module" define_table(tablename, post_id(empty=False), Field("module", comment = T("If you specify a module then this will be used as the text in that module's index page"), label = T("Module"), ), Field("resource", comment = T("If you specify a resource then this will be used as the text in that resource's summary page"), label = T("Resource"), ), #Field("record", # comment = T("If you specify a record then this will be used as a hyperlink to that resource"), # label = T("Record"), # ), *s3_meta_fields()) # CRUD Strings crud_strings[tablename] = Storage( label_create = T("Create Post"), title_display = T("Post Details"), title_list = T("Posts"), title_update = T("Edit Post"), label_list_button = T("List Posts"), msg_record_created = T("Post set as Module/Resource homepage"), msg_record_modified = T("Post updated"), msg_record_deleted = T("Post removed"), msg_list_empty = T("No posts currently set as module/resource homepages")) # --------------------------------------------------------------------- # Tags # tablename = "cms_tag" define_table(tablename, Field("name", label = T("Tag"), ), s3_comments(), # Multiple Roles (@ToDo: Implement the restriction) #s3_roles_permitted(readable = False, # writable = False # ), *s3_meta_fields()) # CRUD Strings crud_strings[tablename] = Storage( label_create = T("Create Tag"), title_display = T("Tag Details"), title_list = T("Tags"), title_update = T("Edit Tag"), title_upload = T("Import Tags"), label_list_button = T("List Tags"), msg_record_created = T("Tag added"), msg_record_modified = T("Tag updated"), msg_record_deleted = T("Tag deleted"), msg_list_empty = T("No tags currently defined")) # Reusable field represent = S3Represent(lookup=tablename, translate=True) tag_id = S3ReusableField("tag_id", "reference %s" % tablename, label = T("Tag"), ondelete = "CASCADE", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF(db, "cms_tag.id", represent)), sortby = "name", ) # --------------------------------------------------------------------- # Tags <> Posts link table # tablename = "cms_tag_post" define_table(tablename, post_id(empty = False), tag_id(empty = False), *s3_meta_fields()) # CRUD Strings crud_strings[tablename] = Storage( label_create = T("Tag Post"), title_display = T("Tag Details"), title_list = T("Tags"), title_update = T("Edit Tag"), title_upload = T("Import Tags"), label_list_button = T("List Tagged Posts"), msg_record_created = T("Post Tagged"), msg_record_modified = T("Tag updated"), msg_record_deleted = T("Tag removed"), msg_list_empty = T("No posts currently tagged")) # --------------------------------------------------------------------- # Comments # - threaded comments on Posts # # @ToDo: Attachments? # # Parent field allows us to: # * easily filter for top-level threads # * easily filter for next level of threading # * hook a new reply into the correct location in the hierarchy # tablename = "cms_comment" define_table(tablename, Field("parent", "reference cms_comment", requires = IS_EMPTY_OR( IS_ONE_OF(db, "cms_comment.id")), readable = False, ), post_id(empty=False), Field("body", "text", notnull=True, label = T("Comment"), ), *s3_meta_fields()) # Resource Configuration configure(tablename, list_fields = ["id", "post_id", "created_by", "modified_on" ], ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(cms_post_id = post_id, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults for model-global names in case module is disabled """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False) return dict(cms_post_id = lambda **attr: dummy("post_id"), ) # ------------------------------------------------------------------------- @staticmethod def cms_series_onaccept(form): """ Cascade values down to all component Posts """ form_vars = form.vars db = current.db table = db.cms_post query = (table.series_id == form_vars.id) db(query).update(avatar = form_vars.avatar, replies = form_vars.replies, roles_permitted = form_vars.roles_permitted, ) # ------------------------------------------------------------------------- @staticmethod def cms_post_duplicate(item): """ CMS Post Import - Update Detection (primarily for non-blog contents such as homepage, module index pages, summary pages, or online documentation): - same name and series => same post @param item: the import item @todo: if no name present => use cms_post_module component to identify updates (also requires deduplication of cms_post_module component) """ data = item.data name = data.get("name") series_id = data.get("series_id") if not name: return table = item.table query = (table.name == name) & \ (table.series_id == series_id) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # ------------------------------------------------------------------------- @staticmethod def cms_post_onaccept(form): """ Handle the case where the page is for a Module home page, Resource Summary page or Map Layer """ db = current.db s3db = current.s3db post_id = form.vars.id get_vars = current.request.get_vars module = get_vars.get("module", None) if module: table = db.cms_post_module query = (table.module == module) resource = get_vars.get("resource", None) if resource: # Resource Summary page query &= (table.resource == resource) else: # Module home page query &= ((table.resource == None) | \ (table.resource == "index")) result = db(query).update(post_id=post_id) if not result: table.insert(post_id=post_id, module=module, resource=resource, ) layer_id = get_vars.get("layer_id", None) if layer_id: table = s3db.cms_post_layer query = (table.layer_id == layer_id) result = db(query).update(post_id=post_id) if not result: table.insert(post_id=post_id, layer_id=layer_id, ) # Read record table = db.cms_post record = db(table.id == post_id).select(table.person_id, table.created_by, limitby=(0, 1) ).first() if record.created_by and not record.person_id: # Set from Author ptable = s3db.pr_person putable = s3db.pr_person_user query = (putable.user_id == record.created_by) & \ (putable.pe_id == ptable.pe_id) person = db(query).select(ptable.id, limitby=(0, 1) ).first() if person: db(table.id == post_id).update(person_id=person.id) # ----------------------------------------------------------------------------- @staticmethod def cms_add_tag(r, **attr): """ Add a Tag to a Post S3Method for interactive requests - designed to be called as an afterTagAdded callback to tag-it.js """ post_id = r.id if not post_id or len(r.args) < 3: raise HTTP(501, current.ERROR.BAD_METHOD) tag = r.args[2] db = current.db ttable = db.cms_tag ltable = db.cms_tag_post exists = db(ttable.name == tag).select(ttable.id, ttable.deleted, ttable.deleted_fk, limitby=(0, 1) ).first() if exists: tag_id = exists.id if exists.deleted: if exists.deleted_fk: data = json.loads(exists.deleted_fk) data["deleted"] = False else: data = dict(deleted=False) db(ttable.id == tag_id).update(**data) else: tag_id = ttable.insert(name=tag) query = (ltable.tag_id == tag_id) & \ (ltable.post_id == post_id) exists = db(query).select(ltable.id, ltable.deleted, ltable.deleted_fk, limitby=(0, 1) ).first() if exists: if exists.deleted: if exists.deleted_fk: data = json.loads(exists.deleted_fk) data["deleted"] = False else: data = dict(deleted=False) db(ltable.id == exists.id).update(**data) else: ltable.insert(post_id = post_id, tag_id = tag_id, ) output = current.xml.json_message(True, 200, "Tag Added") current.response.headers["Content-Type"] = "application/json" return output # ----------------------------------------------------------------------------- @staticmethod def cms_remove_tag(r, **attr): """ Remove a Tag from a Post S3Method for interactive requests - designed to be called as an afterTagRemoved callback to tag-it.js """ post_id = r.id if not post_id or len(r.args) < 3: raise HTTP(501, current.ERROR.BAD_METHOD) tag = r.args[2] db = current.db ttable = db.cms_tag exists = db(ttable.name == tag).select(ttable.id, ttable.deleted, limitby=(0, 1) ).first() if exists: tag_id = exists.id ltable = db.cms_tag_post query = (ltable.tag_id == tag_id) & \ (ltable.post_id == post_id) exists = db(query).select(ltable.id, ltable.deleted, limitby=(0, 1) ).first() if exists and not exists.deleted: resource = current.s3db.resource("cms_tag_post", id=exists.id) resource.delete() output = current.xml.json_message(True, 200, "Tag Removed") current.response.headers["Content-Type"] = "application/json" return output # ----------------------------------------------------------------------------- @staticmethod def cms_add_bookmark(r, **attr): """ Bookmark a Post S3Method for interactive requests """ post_id = r.id user = current.auth.user user_id = user and user.id if not post_id or not user_id: raise HTTP(501, current.ERROR.BAD_METHOD) db = current.db ltable = db.cms_post_user query = (ltable.post_id == post_id) & \ (ltable.user_id == user_id) exists = db(query).select(ltable.id, ltable.deleted, ltable.deleted_fk, limitby=(0, 1) ).first() if exists: link_id = exists.id if exists.deleted: if exists.deleted_fk: data = json.loads(exists.deleted_fk) data["deleted"] = False else: data = dict(deleted=False) db(ltable.id == link_id).update(**data) else: link_id = ltable.insert(post_id = post_id, user_id = user_id, ) output = current.xml.json_message(True, 200, "Bookmark Added") current.response.headers["Content-Type"] = "application/json" return output # ----------------------------------------------------------------------------- @staticmethod def cms_remove_bookmark(r, **attr): """ Remove a Bookmark for a Post S3Method for interactive requests """ post_id = r.id user = current.auth.user user_id = user and user.id if not post_id or not user_id: raise HTTP(501, current.ERROR.BAD_METHOD) db = current.db ltable = db.cms_post_user query = (ltable.post_id == post_id) & \ (ltable.user_id == user_id) exists = db(query).select(ltable.id, ltable.deleted, limitby=(0, 1) ).first() if exists and not exists.deleted: resource = current.s3db.resource("cms_post_user", id=exists.id) resource.delete() output = current.xml.json_message(True, 200, "Bookmark Removed") current.response.headers["Content-Type"] = "application/json" return output # ============================================================================= class S3ContentMapModel(S3Model): """ Use of the CMS to provide extra data about Map Layers """ names = ("cms_post_layer",) def model(self): # --------------------------------------------------------------------- # Layers <> Posts link table # tablename = "cms_post_layer" self.define_table(tablename, self.cms_post_id(empty = False), self.super_link("layer_id", "gis_layer_entity"), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # ============================================================================= class S3ContentOrgModel(S3Model): """ Link Posts to Organisations """ names = ("cms_post_organisation",) def model(self): # --------------------------------------------------------------------- # Organisations <> Posts link table # tablename = "cms_post_organisation" self.define_table(tablename, self.cms_post_id(empty = False, ondelete = "CASCADE", ), self.org_organisation_id(empty = False, ondelete = "CASCADE", ), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # ============================================================================= class S3ContentOrgGroupModel(S3Model): """ Link Posts to Organisation Groups (Coalitions/Networks) """ names = ("cms_post_organisation_group",) def model(self): # --------------------------------------------------------------------- # Organisation Groups <> Posts link table # tablename = "cms_post_organisation_group" self.define_table(tablename, self.cms_post_id(empty=False), self.org_group_id(empty=False), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # ============================================================================= class S3ContentUserModel(S3Model): """ Link Posts to Users to allow Users to Bookmark posts """ names = ("cms_post_user",) def model(self): # --------------------------------------------------------------------- # Users <> Posts link table # tablename = "cms_post_user" self.define_table(tablename, self.cms_post_id(empty=False), Field("user_id", current.auth.settings.table_user), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return {} # ============================================================================= def cms_rheader(r, tabs=[]): """ CMS Resource Headers """ if r.representation != "html": # RHeaders only used in interactive views return None record = r.record if record is None: # List or Create form: rheader makes no sense here return None table = r.table resourcename = r.name T = current.T if resourcename == "series": # Tabs tabs = [(T("Basic Details"), None), (T("Posts"), "post"), ] rheader_tabs = s3_rheader_tabs(r, tabs) rheader = DIV(TABLE(TR(TH("%s: " % table.name.label), record.name ), ), rheader_tabs) elif resourcename == "post": # Tabs tabs = [(T("Basic Details"), None), ] if record.replies: tabs.append((T("Comments"), "discuss")) rheader_tabs = s3_rheader_tabs(r, tabs) rheader = DIV(TABLE(TR(TH("%s: " % table.name.label), record.name ), ), rheader_tabs) return rheader # ============================================================================= def cms_index(module, resource=None, page_name=None, alt_function=None): """ Return a module index page retrieved from CMS - or run an alternate function if not found """ response = current.response settings = current.deployment_settings if not page_name: page_name = settings.modules[module].name_nice response.title = page_name item = None if settings.has_module("cms") and not settings.get_cms_hide_index(module): db = current.db table = current.s3db.cms_post ltable = db.cms_post_module query = (ltable.module == module) & \ (ltable.post_id == table.id) & \ (table.deleted != True) if resource is None: query &= ((ltable.resource == None) | \ (ltable.resource == "index")) else: query &= (ltable.resource == resource) _item = db(query).select(table.id, table.body, table.title, limitby=(0, 1)).first() # @ToDo: Replace this crude check with? #if current.auth.s3_has_permission("update", table, record_id=_item.id): auth = current.auth ADMIN = auth.get_system_roles().ADMIN ADMIN = auth.s3_has_role(ADMIN) get_vars = {"module": module} if resource: get_vars["resource"] = resource if _item: if _item.title: response.title = _item.title if ADMIN: item = DIV(XML(_item.body), BR(), A(current.T("Edit"), _href=URL(c="cms", f="post", args=[_item.id, "update"], vars=get_vars), _class="action-btn")) else: item = XML(_item.body) elif ADMIN: item = DIV(H2(page_name), A(current.T("Edit"), _href=URL(c="cms", f="post", args="create", vars=get_vars), _class="action-btn")) if not item: if alt_function: # Serve the alternate controller function # Copied from gluon.main serve_controller() # (We don't want to re-run models) from gluon.compileapp import build_environment, run_controller_in, run_view_in request = current.request environment = build_environment(request, response, current.session) environment["settings"] = settings environment["s3db"] = current.s3db # Retain certain globals (extend as needed): g = globals() environment["s3base"] = g.get("s3base") environment["s3_redirect_default"] = g.get("s3_redirect_default") page = run_controller_in(request.controller, alt_function, environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page) run_view_in(response._view_environment) page = response.body.getvalue() # Set default headers if not set default_headers = [ ("Content-Type", contenttype("." + request.extension)), ("Cache-Control", "no-store, no-cache, must-revalidate, post-check=0, pre-check=0"), ("Expires", time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())), ("Pragma", "no-cache")] for key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) else: item = H2(page_name) # tbc report = "" response.view = "index.html" return dict(item=item, report=report) # ============================================================================= def cms_documentation(r, default_page, default_url): """ Render an online documentation page, to be called from prep @param r: the S3Request @param default_page: the default page name @param default_url: the default URL if no contents found """ row = r.record if not row: # Find the CMS page name = r.get_vars.get("name", default_page) table = r.resource.table query = (table.name == name) & (table.deleted != True) row = current.db(query).select(table.id, table.title, table.body, limitby=(0, 1)).first() if not row: if name != default_page: # Error - CMS page not found r.error(404, current.T("Page not found"), next=URL(args=current.request.args, vars={}), ) else: # No CMS contents for module homepage found at all # => redirect to default page (preserving all errors) from s3 import s3_redirect_default s3_redirect_default(default_url) # Render the page from s3 import S3XMLContents return {"bypass": True, "output": {"title": row.title, "contents": S3XMLContents(row.body), }, } # ============================================================================= class S3CMS(S3Method): """ Class to generate a Rich Text widget to embed in a page """ # ------------------------------------------------------------------------- def apply_method(self, r, **attr): """ Entry point to apply cms method to S3Requests - produces a full page with a Richtext widget @param r: the S3Request @param attr: dictionary of parameters for the method handler @return: output object to send to the view """ # Not Implemented r.error(405, current.ERROR.BAD_METHOD) # ------------------------------------------------------------------------- def widget(self, r, method="cms", widget_id=None, **attr): """ Render a Rich Text widget suitable for use in a page such as S3Summary @param method: the widget method @param r: the S3Request @param attr: controller attributes @ToDo: Support comments """ if not current.deployment_settings.has_module("cms"): return "" # This is currently assuming that we're being used in a Summary page or similar request = current.request return self.resource_content(request.controller, request.function, widget_id) # ------------------------------------------------------------------------- @staticmethod def resource_content(module, resource, widget_id=None): db = current.db table = current.s3db.cms_post ltable = db.cms_post_module query = (ltable.module == module) & \ (ltable.resource == resource) & \ (ltable.post_id == table.id) & \ (table.deleted != True) _item = db(query).select(table.id, table.body, limitby=(0, 1)).first() # @ToDo: Replace this crude check with? #if current.auth.s3_has_permission("update", r.table, record_id=r.id): auth = current.auth ADMIN = auth.get_system_roles().ADMIN ADMIN = auth.s3_has_role(ADMIN) if _item: if ADMIN: if current.response.s3.crud.formstyle == "bootstrap": _class = "btn" else: _class = "action-btn" item = DIV(XML(_item.body), A(current.T("Edit"), _href=URL(c="cms", f="post", args=[_item.id, "update"], vars={"module": module, "resource": resource }), _class="%s cms-edit" % _class)) else: item = XML(_item.body) elif ADMIN: if current.response.s3.crud.formstyle == "bootstrap": _class = "btn" else: _class = "action-btn" item = A(current.T("Edit"), _href=URL(c="cms", f="post", args="create", vars={"module": module, "resource": resource }), _class="%s cms-edit" % _class) else: item = "" output = DIV(item, _id=widget_id, _class="cms_content") return output # ============================================================================= def cms_customise_post_fields(): """ Customize cms_post fields for the Newsfeed / Home Pages """ s3db = current.s3db s3 = current.response.s3 settings = current.deployment_settings org_field = settings.get_cms_organisation() if org_field == "created_by$organisation_id": current.auth.settings.table_user.organisation_id.represent = \ s3db.org_organisation_represent elif org_field == "post_organisation.organisation_id": s3db.cms_post_organisation.organisation_id.label = "" org_group_field = settings.get_cms_organisation_group() if org_group_field == "created_by$org_group_id": current.auth.settings.table_user.org_group_id.represent = \ s3db.org_organisation_group_represent elif org_group_field == "post_organisation_group.group_id": s3db.cms_post_organisation_group.group_id.label = "" table = s3db.cms_post table.series_id.requires = table.series_id.requires.other contact_field = settings.get_cms_person() if contact_field == "created_by": table.created_by.represent = s3_auth_user_represent_name elif contact_field == "person_id": field = table.person_id field.readable = True field.writable = True field.comment = None # Default now #field.requires = IS_ADD_PERSON_WIDGET2() field.widget = S3AddPersonWidget2(controller="pr") field = table.location_id field.label = "" field.represent = s3db.gis_LocationRepresent(sep=" | ") # Required field.requires = IS_LOCATION() list_fields = ["series_id", "location_id", "date", ] lappend = list_fields.append if settings.get_cms_show_titles(): lappend("title") lappend("body") if contact_field: lappend(contact_field) if org_field: lappend(org_field) if org_group_field: lappend(org_group_field) if settings.get_cms_show_attachments(): lappend("document.file") if settings.get_cms_show_links(): lappend("document.url") if settings.get_cms_show_events(): lappend("event_post.event_id") if settings.get_cms_location_click_filters(): script = \ '''S3.filter_location=function(d){var cb for(var p in d){cb=$('input[name="multiselect_post-cms_post_location_id-location-filter-L'+p+'"][value="'+d[p]+'"]') if(!cb.prop('checked')){cb.click()}}}''' s3.jquery_ready.append(script) # Which levels of Hierarchy are we using? levels = current.gis.get_relevant_hierarchy_levels() for level in levels: lappend("location_id$%s" % level) if settings.get_cms_show_tags(): lappend("tag.name") if s3.debug: s3.scripts.append("/%s/static/scripts/tag-it.js" % current.request.application) else: s3.scripts.append("/%s/static/scripts/tag-it.min.js" % current.request.application) if current.auth.s3_has_permission("update", current.db.cms_tag_post): readonly = '''afterTagAdded:function(event,ui){ if(ui.duringInitialization){return} var post_id=$(this).attr('data-post_id') var url=S3.Ap.concat('/cms/post/',post_id,'/add_tag/',ui.tagLabel) $.getS3(url) },afterTagRemoved:function(event,ui){ var post_id=$(this).attr('data-post_id') var url=S3.Ap.concat('/cms/post/',post_id,'/remove_tag/',ui.tagLabel) $.getS3(url) },''' else: readonly = '''readOnly:true''' script = \ '''S3.tagit=function(){$('.s3-tags').tagit({autocomplete:{source:'%s'},%s})} S3.tagit() S3.redraw_fns.push('tagit')''' % (URL(c="cms", f="tag", args="search_ac.json"), readonly) s3.jquery_ready.append(script) s3db.configure("cms_post", list_fields = list_fields, ) return table # ============================================================================= def cms_post_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for CMS Posts on the Home & News Feed pages. @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["cms_post.id"] item_class = "thumbnail" db = current.db s3db = current.s3db settings = current.deployment_settings NONE = current.messages["NONE"] org_field = settings.get_cms_organisation() # Convert to the right format for this context if org_field == "created_by$organisation_id": org_field = "auth_user.organisation_id" elif org_field == "post_organisation.organisation_id": org_field = "cms_post_organisation.organisation_id" org_group_field = settings.get_cms_organisation_group() # Convert to the right format for this context if org_group_field == "created_by$org_group_id": org_group_field = "auth_user.org_group_id" elif org_group_field == "post_organisation_group.group_id": org_group_field = "cms_post_organisation_group.group_id" raw = record._row body = record["cms_post.body"] series_id = raw["cms_post.series_id"] title = record["cms_post.title"] if title and title != NONE: subtitle = [DIV(title, _class="card-subtitle" ) ] else: subtitle = [] for event_resource in ["event", "incident"]: label = record["event_post.%s_id" % event_resource] if label and label != NONE: link=URL(c="event", f=event_resource, args=[raw["event_post.%s_id" % event_resource], "profile"] ) subtitle.append(DIV(A(ICON(event_resource), label, _href=link, _target="_blank", ), _class="card-subtitle" )) if subtitle: subtitle.append(body) body = TAG[""](*subtitle) date = record["cms_post.date"] date = SPAN(date, _class="date-title", ) location_id = raw["cms_post.location_id"] if location_id: location = record["cms_post.location_id"] if settings.get_cms_location_click_filters(): # Which levels of Hierarchy are we using? levels = current.gis.get_relevant_hierarchy_levels() data = {} for level in levels: data[level[1:]] = raw["gis_location.%s" % level] onclick = '''S3.filter_location(%s)''' % json.dumps(data, separators=SEPARATORS) location = SPAN(A(location, _href="#", _onclick=onclick, ), _class="location-title", ) else: location_url = URL(c="gis", f="location", args=[location_id, "profile"]) location = SPAN(A(location, _href=location_url, ), _class="location-title", ) else: location = "" person = "" contact_field = settings.get_cms_person() if contact_field == "created_by": author_id = raw["cms_post.created_by"] person = record["cms_post.created_by"] # @ToDo: Bulk lookup ltable = s3db.pr_person_user ptable = db.pr_person query = (ltable.user_id == author_id) & \ (ltable.pe_id == ptable.pe_id) row = db(query).select(ptable.id, limitby=(0, 1) ).first() if row: person_id = row.id else: person_id = None elif contact_field == "person_id": person_id = raw["cms_post.person_id"] if person_id: person = record["cms_post.person_id"] else: person_id = None if person: if person_id: # @ToDo: deployment_setting for controller to use? person_url = URL(c="pr", f="person", args=[person_id]) else: person_url = "#" person = A(person, _href=person_url, ) avatar = "" organisation = "" if org_field: organisation_id = raw[org_field] if organisation_id: organisation = record[org_field] org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) organisation = A(organisation, _href=org_url, _class="card-organisation", ) # Avatar # Try Organisation Logo otable = db.org_organisation row = db(otable.id == organisation_id).select(otable.logo, limitby=(0, 1) ).first() if row and row.logo: logo = URL(c="default", f="download", args=[row.logo]) avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px", _class="media-object") else: avatar = organisation avatar = A(avatar, _href=org_url, _class="pull-left", ) org_group = "" if org_group_field: org_group_id = raw[org_group_field] if org_group_id: org_group = record[org_group_field] org_group_url = URL(c="org", f="group", args=[org_group_id, "profile"]) org_group = A(org_group, _href=org_group_url, _class="card-org-group", ) if not avatar and person_id: # Personal Avatar avatar = s3_avatar_represent(person_id, tablename="pr_person", _class="media-object") avatar = A(avatar, _href=person_url, _class="pull-left", ) if person and organisation: card_person = DIV(person, " - ", organisation, _class="card-person", ) elif person and org_group: card_person = DIV(person, " - ", org_group, _class="card-person", ) elif person: card_person = DIV(person, _class="card-person", ) #elif organisation: # card_person = DIV(organisation, # _class="card-person", # ) elif org_group: card_person = DIV(org_group, _class="card-person", ) else: card_person = DIV(_class="card-person", ) permit = current.auth.s3_has_permission table = db.cms_post updateable = permit("update", table, record_id=record_id) if settings.get_cms_show_tags(): tags = raw["cms_tag.name"] if tags or updateable: tag_list = UL(_class="s3-tags", ) tag_list["_data-post_id"] = record_id else: tag_list = "" if tags: if not isinstance(tags, list): tags = [tags]#.split(", ") for tag in tags: tag_item = LI(tag) tag_list.append(tag_item) tags = tag_list else: tags = "" T = current.T if series_id: series = record["cms_post.series_id"] translate = settings.get_L10n_translate_cms_series() if translate: series_title = T(series) else: series_title = series else: series_title = series = "" request = current.request # Tool box if updateable: if request.function == "newsfeed": fn = "newsfeed" else: fn = "post" edit_btn = A(ICON("edit"), _href=URL(c="cms", f=fn, args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id} ), _class="s3_modal", _title=T("Edit %(type)s") % dict(type=series_title), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(ICON("delete"), _class="dl-item-delete", ) else: delete_btn = "" user = current.auth.user if user and settings.get_cms_bookmarks(): ltable = s3db.cms_post_user query = (ltable.post_id == record_id) & \ (ltable.user_id == user.id) exists = db(query).select(ltable.id, limitby=(0, 1) ).first() if exists: bookmark_btn = A(ICON("bookmark"), _onclick="$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})" % (URL(c="cms", f="post", args=[record_id, "remove_bookmark"]), list_id, record_id), _title=T("Remove Bookmark"), ) else: bookmark_btn = A(ICON("bookmark-empty"), _onclick="$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})" % (URL(c="cms", f="post", args=[record_id, "add_bookmark"]), list_id, record_id), _title=T("Add Bookmark"), ) else: bookmark_btn = "" toolbox = DIV(bookmark_btn, edit_btn, delete_btn, _class="edit-bar fright", ) # Dropdown of available documents documents = raw["doc_document.file"] if documents: if not isinstance(documents, list): documents = [documents] doc_list_id = "attachments-%s" % item_id doc_list = UL(_class="f-dropdown dropdown-menu", _role="menu", _id=doc_list_id, data={"dropdown-content": ""}, ) retrieve = db.doc_document.file.retrieve for doc in documents: try: doc_name = retrieve(doc)[0] except (IOError, TypeError): doc_name = NONE doc_url = URL(c="default", f="download", args=[doc]) doc_item = LI(A(ICON("file"), " ", doc_name, _href=doc_url, ), _role="menuitem", ) doc_list.append(doc_item) docs = DIV(A(ICON("attachment"), SPAN(_class="caret"), _class="btn dropdown-toggle dropdown", _href="#", data={"toggle": "dropdown", "dropdown": doc_list_id, }, ), doc_list, _class="btn-group attachments dropdown pull-right", ) else: docs = "" links = raw["doc_document.url"] if links: if not isinstance(links, list): links = [links] link_list = DIV(_class="media card-links") for link in links: link_item = A(ICON("link"), " ", link, _href=link, _target="_blank", _class="card-link", ) link_list.append(link_item) else: link_list = "" if "profile" in request.args: # Single resource list # - don't show series_title if settings.get_cms_show_titles(): title = raw["cms_post.title"] or "" else: title = "" card_label = SPAN(" %s" % title, _class="card-title") else: # Mixed resource lists (Home, News Feed) icon = series.lower().replace(" ", "_") series_title = SPAN(" %s" % series_title, _class="card-title") if settings.get_cms_show_titles() and raw["cms_post.title"]: title = SPAN(raw["cms_post.title"], _class="card-title2") card_label = TAG[""](ICON(icon), series_title, title) else: card_label = TAG[""](ICON(icon), series_title) # Type cards if series == "Alert": # Apply additional highlighting for Alerts item_class = "%s disaster" % item_class # Render the item if series == "Event" and "newsfeed" not in request.args: # and request.function != "newsfeed" # Events on Homepage have a different header date.add_class("event") header = DIV(date, location, toolbox, _class="card-header", ) else: header = DIV(card_label, location, date, toolbox, _class="card-header", ) item = DIV(header, DIV(avatar, DIV(DIV(body, card_person, _class="media", ), _class="media-body", ), _class="media", ), tags, docs, link_list, _class=item_class, _id=item_id, ) return item # END =========================================================================
mit
4,432,145,194,334,658,600
-6,045,314,549,637,374,000
37.378317
195
0.417476
false
ansible/ansible-modules-extras
windows/win_iis_webapppool.py
11
3662
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Henrik Wallström <henrik@wallstroms.nu> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: win_iis_webapppool version_added: "2.0" short_description: Configures a IIS Web Application Pool. description: - Creates, Removes and configures a IIS Web Application Pool options: name: description: - Names of application pool required: true default: null aliases: [] state: description: - State of the binding choices: - absent - stopped - started - restarted required: false default: null aliases: [] attributes: description: - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2" required: false default: null aliases: [] author: Henrik Wallström ''' EXAMPLES = ''' # This return information about an existing application pool $ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows host | success >> { "attributes": {}, "changed": false, "info": { "attributes": { "CLRConfigFile": "", "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415", "autoStart": true, "enable32BitAppOnWin64": false, "enableConfigurationOverride": true, "managedPipelineMode": 0, "managedRuntimeLoader": "webengine4.dll", "managedRuntimeVersion": "v4.0", "name": "DefaultAppPool", "passAnonymousToken": true, "queueLength": 1000, "startMode": 0, "state": 1 }, "name": "DefaultAppPool", "state": "Started" } } # This creates a new application pool in 'Started' state $ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows # This stoppes an application pool $ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows # This restarts an application pool $ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows # This restarts an application pool $ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows # This change application pool attributes without touching state $ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows # This creates an application pool and sets attributes $ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows # Playbook example --- - name: App Pool with .NET 4.0 win_iis_webapppool: name: 'AppPool' state: started attributes: managedRuntimeVersion:v4.0 register: webapppool '''
gpl-3.0
-5,354,367,320,079,342,000
2,765,912,762,960,525,000
30.551724
150
0.672404
false
Tennyson53/SUR
magnum/tests/unit/common/cert_manager/test_cert_manager.py
6
1550
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture from magnum.common import cert_manager from magnum.common.cert_manager import barbican_cert_manager as bcm from magnum.common.cert_manager import get_backend from magnum.common.cert_manager import local_cert_manager as lcm from magnum.tests import base class TestCertManager(base.BaseTestCase): def setUp(self): cert_manager._CERT_MANAGER_PLUGIN = None super(TestCertManager, self).setUp() def test_barbican_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='barbican') self.assertEqual(get_backend().CertManager, bcm.CertManager) def test_local_cert_manager(self): fixture.Config().config(group='certificates', cert_manager_type='local') self.assertEqual(get_backend().CertManager, lcm.CertManager)
apache-2.0
-4,870,987,125,391,549,000
5,062,019,405,932,071,000
37.75
75
0.696774
false
mj10777/QGIS
cmake/FindQsci.py
77
2612
# -*- coding: utf-8 -*- # # Copyright (c) 2012, Larry Shaffer <larry@dakotacarto.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Larry Shaffer <larry@dakotacarto.com> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Larry Shaffer <larry@dakotacarto.com> ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Larry Shaffer <larry@dakotacarto.com> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Find QScintilla2 PyQt4/PyQt5 module version. .. note:: Redistribution and use is allowed according to the terms of the BSD license. For details see the accompanying COPYING-CMAKE-SCRIPTS file. """ __author__ = 'Larry Shaffer (larry@dakotacarto.com)' __date__ = '22/10/2012' __copyright__ = 'Copyright 2012, The QGIS Project' import sys VER = "" if len(sys.argv) > 0: if sys.argv[1] == "4": from PyQt4.Qsci import QSCINTILLA_VERSION_STR VER = QSCINTILLA_VERSION_STR else: from PyQt5.Qsci import QSCINTILLA_VERSION_STR VER = QSCINTILLA_VERSION_STR else: try: from PyQt4.Qsci import QSCINTILLA_VERSION_STR VER = QSCINTILLA_VERSION_STR except ImportError: try: from PyQt5.Qsci import QSCINTILLA_VERSION_STR VER = QSCINTILLA_VERSION_STR except ImportError: pass print("qsci_version_str:%s" % VER)
gpl-2.0
8,709,471,563,116,985,000
7,781,173,888,182,339,000
44.034483
90
0.708652
false
ludwiktrammer/odoo
addons/account/report/account_balance.py
22
3313
# -*- coding: utf-8 -*- import time from openerp import api, models class ReportTrialBalance(models.AbstractModel): _name = 'report.account.report_trialbalance' def _get_accounts(self, accounts, display_account): """ compute the balance, debit and credit for the provided accounts :Arguments: `accounts`: list of accounts record, `display_account`: it's used to display either all accounts or those accounts which balance is > 0 :Returns a list of dictionary of Accounts with following key and value `name`: Account name, `code`: Account code, `credit`: total amount of credit, `debit`: total amount of debit, `balance`: total amount of balance, """ account_result = {} # Prepare sql query base on selected parameters from wizard tables, where_clause, where_params = self.env['account.move.line']._query_get() tables = tables.replace('"','') if not tables: tables = 'account_move_line' wheres = [""] if where_clause.strip(): wheres.append(where_clause.strip()) filters = " AND ".join(wheres) # compute the balance, debit and credit for the provided accounts request = ("SELECT account_id AS id, SUM(debit) AS debit, SUM(credit) AS credit, (SUM(debit) - SUM(credit)) AS balance" +\ " FROM " + tables + " WHERE account_id IN %s " + filters + " GROUP BY account_id") params = (tuple(accounts.ids),) + tuple(where_params) self.env.cr.execute(request, params) for row in self.env.cr.dictfetchall(): account_result[row.pop('id')] = row account_res = [] for account in accounts: res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance']) currency = account.currency_id and account.currency_id or account.company_id.currency_id res['code'] = account.code res['name'] = account.name if account.id in account_result.keys(): res['debit'] = account_result[account.id].get('debit') res['credit'] = account_result[account.id].get('credit') res['balance'] = account_result[account.id].get('balance') if display_account == 'all': account_res.append(res) if display_account in ['movement', 'not_zero'] and not currency.is_zero(res['balance']): account_res.append(res) return account_res @api.multi def render_html(self, data): self.model = self.env.context.get('active_model') docs = self.env[self.model].browse(self.env.context.get('active_id')) display_account = data['form'].get('display_account') accounts = self.env['account.account'].search([]) account_res = self.with_context(data['form'].get('used_context'))._get_accounts(accounts, display_account) docargs = { 'doc_ids': self.ids, 'doc_model': self.model, 'data': data['form'], 'docs': docs, 'time': time, 'Accounts': account_res, } return self.env['report'].render('account.report_trialbalance', docargs)
agpl-3.0
8,196,786,485,500,349,000
8,199,937,759,040,128,000
43.77027
130
0.578026
false
matrix-org/synapse
tests/replication/test_sharded_event_persister.py
1
12377
# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from unittest.mock import patch from synapse.api.room_versions import RoomVersion from synapse.rest import admin from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request from tests.utils import USE_POSTGRES_FOR_TESTS logger = logging.getLogger(__name__) class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): """Checks event persisting sharding works""" # Event persister sharding requires postgres (due to needing # `MultiWriterIdGenerator`). if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, sync.register_servlets, ] def prepare(self, reactor, clock, hs): # Register a user who sends a message that we'll get notified about self.other_user_id = self.register_user("otheruser", "pass") self.other_access_token = self.login("otheruser", "pass") self.room_creator = self.hs.get_room_creation_handler() self.store = hs.get_datastore() def default_config(self): conf = super().default_config() conf["redis"] = {"enabled": "true"} conf["stream_writers"] = {"events": ["worker1", "worker2"]} conf["instance_map"] = { "worker1": {"host": "testserv", "port": 1001}, "worker2": {"host": "testserv", "port": 1002}, } return conf def _create_room(self, room_id: str, user_id: str, tok: str): """Create a room with given room_id""" # We control the room ID generation by patching out the # `_generate_room_id` method async def generate_room( creator_id: str, is_public: bool, room_version: RoomVersion ): await self.store.store_room( room_id=room_id, room_creator_user_id=creator_id, is_public=is_public, room_version=room_version, ) return room_id with patch( "synapse.handlers.room.RoomCreationHandler._generate_room_id" ) as mock: mock.side_effect = generate_room self.helper.create_room_as(user_id, tok=tok) def test_basic(self): """Simple test to ensure that multiple rooms can be created and joined, and that different rooms get handled by different instances. """ self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "worker1"}, ) self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "worker2"}, ) persisted_on_1 = False persisted_on_2 = False store = self.hs.get_datastore() user_id = self.register_user("user", "pass") access_token = self.login("user", "pass") # Keep making new rooms until we see rooms being persisted on both # workers. for _ in range(10): # Create a room room = self.helper.create_room_as(user_id, tok=access_token) # The other user joins self.helper.join( room=room, user=self.other_user_id, tok=self.other_access_token ) # The other user sends some messages rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token) event_id = rseponse["event_id"] # The event position includes which instance persisted the event. pos = self.get_success(store.get_position_for_event(event_id)) persisted_on_1 |= pos.instance_name == "worker1" persisted_on_2 |= pos.instance_name == "worker2" if persisted_on_1 and persisted_on_2: break self.assertTrue(persisted_on_1) self.assertTrue(persisted_on_2) def test_vector_clock_token(self): """Tests that using a stream token with a vector clock component works correctly with basic /sync and /messages usage. """ self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "worker1"}, ) worker_hs2 = self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "worker2"}, ) sync_hs = self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "sync"}, ) sync_hs_site = self._hs_to_site[sync_hs] # Specially selected room IDs that get persisted on different workers. room_id1 = "!foo:test" room_id2 = "!baz:test" self.assertEqual( self.hs.config.worker.events_shard_config.get_instance(room_id1), "worker1" ) self.assertEqual( self.hs.config.worker.events_shard_config.get_instance(room_id2), "worker2" ) user_id = self.register_user("user", "pass") access_token = self.login("user", "pass") store = self.hs.get_datastore() # Create two room on the different workers. self._create_room(room_id1, user_id, access_token) self._create_room(room_id2, user_id, access_token) # The other user joins self.helper.join( room=room_id1, user=self.other_user_id, tok=self.other_access_token ) self.helper.join( room=room_id2, user=self.other_user_id, tok=self.other_access_token ) # Do an initial sync so that we're up to date. channel = make_request( self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token ) next_batch = channel.json_body["next_batch"] # We now gut wrench into the events stream MultiWriterIdGenerator on # worker2 to mimic it getting stuck persisting an event. This ensures # that when we send an event on worker1 we end up in a state where # worker2 events stream position lags that on worker1, resulting in a # RoomStreamToken with a non-empty instance map component. # # Worker2's event stream position will not advance until we call # __aexit__ again. actx = worker_hs2.get_datastore()._stream_id_gen.get_next() self.get_success(actx.__aenter__()) response = self.helper.send(room_id1, body="Hi!", tok=self.other_access_token) first_event_in_room1 = response["event_id"] # Assert that the current stream token has an instance map component, as # we are trying to test vector clock tokens. room_stream_token = store.get_room_max_token() self.assertNotEqual(len(room_stream_token.instance_map), 0) # Check that syncing still gets the new event, despite the gap in the # stream IDs. channel = make_request( self.reactor, sync_hs_site, "GET", "/sync?since={}".format(next_batch), access_token=access_token, ) # We should only see the new event and nothing else self.assertIn(room_id1, channel.json_body["rooms"]["join"]) self.assertNotIn(room_id2, channel.json_body["rooms"]["join"]) events = channel.json_body["rooms"]["join"][room_id1]["timeline"]["events"] self.assertListEqual( [first_event_in_room1], [event["event_id"] for event in events] ) # Get the next batch and makes sure its a vector clock style token. vector_clock_token = channel.json_body["next_batch"] self.assertTrue(vector_clock_token.startswith("m")) # Now that we've got a vector clock token we finish the fake persisting # an event we started above. self.get_success(actx.__aexit__(None, None, None)) # Now try and send an event to the other rooom so that we can test that # the vector clock style token works as a `since` token. response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token) first_event_in_room2 = response["event_id"] channel = make_request( self.reactor, sync_hs_site, "GET", "/sync?since={}".format(vector_clock_token), access_token=access_token, ) self.assertNotIn(room_id1, channel.json_body["rooms"]["join"]) self.assertIn(room_id2, channel.json_body["rooms"]["join"]) events = channel.json_body["rooms"]["join"][room_id2]["timeline"]["events"] self.assertListEqual( [first_event_in_room2], [event["event_id"] for event in events] ) next_batch = channel.json_body["next_batch"] # We also want to test that the vector clock style token works with # pagination. We do this by sending a couple of new events into the room # and syncing again to get a prev_batch token for each room, then # paginating from there back to the vector clock token. self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token) self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token) channel = make_request( self.reactor, sync_hs_site, "GET", "/sync?since={}".format(next_batch), access_token=access_token, ) prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][ "prev_batch" ] prev_batch2 = channel.json_body["rooms"]["join"][room_id2]["timeline"][ "prev_batch" ] # Paginating back in the first room should not produce any results, as # no events have happened in it. This tests that we are correctly # filtering results based on the vector clock portion. channel = make_request( self.reactor, sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=b".format( room_id1, prev_batch1, vector_clock_token ), access_token=access_token, ) self.assertListEqual([], channel.json_body["chunk"]) # Paginating back on the second room should produce the first event # again. This tests that pagination isn't completely broken. channel = make_request( self.reactor, sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=b".format( room_id2, prev_batch2, vector_clock_token ), access_token=access_token, ) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertEqual( channel.json_body["chunk"][0]["event_id"], first_event_in_room2 ) # Paginating forwards should give the same results channel = make_request( self.reactor, sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=f".format( room_id1, vector_clock_token, prev_batch1 ), access_token=access_token, ) self.assertListEqual([], channel.json_body["chunk"]) channel = make_request( self.reactor, sync_hs_site, "GET", "/rooms/{}/messages?from={}&to={}&dir=f".format( room_id2, vector_clock_token, prev_batch2, ), access_token=access_token, ) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertEqual( channel.json_body["chunk"][0]["event_id"], first_event_in_room2 )
apache-2.0
-5,686,717,592,601,347,000
-5,696,844,679,615,396,000
35.83631
87
0.592874
false