repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
julien78910/CouchPotatoServer | refs/heads/develop | libs/CodernityDB/debug_stuff.py | 44 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.tree_index import TreeBasedIndex
import struct
import os
import inspect
from functools import wraps
import json
class DebugTreeBasedIndex(TreeBasedIndex):
def __init__(self, *args, **kwargs):
super(DebugTreeBasedIndex, self).__init__(*args, **kwargs)
def print_tree(self):
print '-----CURRENT TREE-----'
print self.root_flag
if self.root_flag == 'l':
print '---ROOT---'
self._print_leaf_data(self.data_start)
return
else:
print '---ROOT---'
self._print_node_data(self.data_start)
nr_of_el, children_flag = self._read_node_nr_of_elements_and_children_flag(
self.data_start)
nodes = []
for index in range(nr_of_el):
l_pointer, key, r_pointer = self._read_single_node_key(
self.data_start, index)
nodes.append(l_pointer)
nodes.append(r_pointer)
print 'ROOT NODES', nodes
while children_flag == 'n':
self._print_level(nodes, 'n')
new_nodes = []
for node in nodes:
nr_of_el, children_flag = \
self._read_node_nr_of_elements_and_children_flag(node)
for index in range(nr_of_el):
l_pointer, key, r_pointer = self._read_single_node_key(
node, index)
new_nodes.append(l_pointer)
new_nodes.append(r_pointer)
nodes = new_nodes
self._print_level(nodes, 'l')
def _print_level(self, nodes, flag):
print '---NEXT LVL---'
if flag == 'n':
for node in nodes:
self._print_node_data(node)
elif flag == 'l':
for node in nodes:
self._print_leaf_data(node)
def _print_leaf_data(self, leaf_start_position):
print 'printing data of leaf at', leaf_start_position
nr_of_elements = self._read_leaf_nr_of_elements(leaf_start_position)
self.buckets.seek(leaf_start_position)
data = self.buckets.read(self.leaf_heading_size +
nr_of_elements * self.single_leaf_record_size)
leaf = struct.unpack('<' + self.leaf_heading_format +
nr_of_elements * self.single_leaf_record_format, data)
print leaf
print
def _print_node_data(self, node_start_position):
print 'printing data of node at', node_start_position
nr_of_elements = self._read_node_nr_of_elements_and_children_flag(
node_start_position)[0]
self.buckets.seek(node_start_position)
data = self.buckets.read(self.node_heading_size + self.pointer_size
+ nr_of_elements * (self.key_size + self.pointer_size))
node = struct.unpack('<' + self.node_heading_format + self.pointer_format
+ nr_of_elements * (
self.key_format + self.pointer_format),
data)
print node
print
# ------------------>
def database_step_by_step(db_obj, path=None):
if not path:
# ugly for multiplatform support....
p = db_obj.path
p1 = os.path.split(p)
p2 = os.path.split(p1[0])
p3 = '_'.join([p2[1], 'operation_logger.log'])
path = os.path.join(os.path.split(p2[0])[0], p3)
f_obj = open(path, 'wb')
__stack = [] # inspect.stack() is not working on pytest etc
def remove_from_stack(name):
for i in range(len(__stack)):
if __stack[-i] == name:
__stack.pop(-i)
def __dumper(f):
@wraps(f)
def __inner(*args, **kwargs):
funct_name = f.__name__
if funct_name == 'count':
name = args[0].__name__
meth_args = (name,) + args[1:]
elif funct_name in ('reindex_index', 'compact_index'):
name = args[0].name
meth_args = (name,) + args[1:]
else:
meth_args = args
kwargs_copy = kwargs.copy()
res = None
__stack.append(funct_name)
if funct_name == 'insert':
try:
res = f(*args, **kwargs)
except:
packed = json.dumps((funct_name,
meth_args, kwargs_copy, None))
f_obj.write('%s\n' % packed)
f_obj.flush()
raise
else:
packed = json.dumps((funct_name,
meth_args, kwargs_copy, res))
f_obj.write('%s\n' % packed)
f_obj.flush()
else:
if funct_name == 'get':
for curr in __stack:
if ('delete' in curr or 'update' in curr) and not curr.startswith('test'):
remove_from_stack(funct_name)
return f(*args, **kwargs)
packed = json.dumps((funct_name, meth_args, kwargs_copy))
f_obj.write('%s\n' % packed)
f_obj.flush()
res = f(*args, **kwargs)
remove_from_stack(funct_name)
return res
return __inner
for meth_name, meth_f in inspect.getmembers(db_obj, predicate=inspect.ismethod):
if not meth_name.startswith('_'):
setattr(db_obj, meth_name, __dumper(meth_f))
setattr(db_obj, 'operation_logger', f_obj)
def database_from_steps(db_obj, path):
# db_obj.insert=lambda data : insert_for_debug(db_obj, data)
with open(path, 'rb') as f_obj:
for current in f_obj:
line = json.loads(current[:-1])
if line[0] == 'count':
obj = getattr(db_obj, line[1][0])
line[1] = [obj] + line[1][1:]
name = line[0]
if name == 'insert':
try:
line[1][0].pop('_rev')
except:
pass
elif name in ('delete', 'update'):
el = db_obj.get('id', line[1][0]['_id'])
line[1][0]['_rev'] = el['_rev']
# print 'FROM STEPS doing', line
meth = getattr(db_obj, line[0], None)
if not meth:
raise Exception("Method = `%s` not found" % line[0])
meth(*line[1], **line[2])
# def insert_for_debug(self, data):
#
# _rev = data['_rev']
#
# if not '_id' in data:
# _id = uuid4().hex
# else:
# _id = data['_id']
# data['_id'] = _id
# try:
# _id = bytes(_id)
# except:
# raise DatabaseException("`_id` must be valid bytes object")
# self._insert_indexes(_id, _rev, data)
# ret = {'_id': _id, '_rev': _rev}
# data.update(ret)
# return ret
|
cerndb/storage-api | refs/heads/master | storage_api/extensions/tests/test_storage.py | 1 | from storage_api.extensions.storage import (DummyStorage,
NetappStorage) # noqa
import uuid
import functools
import os
from unittest import mock
from contextlib import contextmanager
import pytest
import netapp.api
DEFAULT_VOLUME_SIZE = 30000000
def id_from_vol(v, backend):
if isinstance(backend, NetappStorage):
return "{}:{}".format(v['filer_address'], v['junction_path'])
else:
return v['name']
def new_volume(backend):
run_id = 44
name = ':/volumename_{}'.format(run_id)
return backend.create_volume(name,
name="volume_name_{}".format(run_id),
size_total=DEFAULT_VOLUME_SIZE)
def delete_volume(backend, volume_name):
if not isinstance(backend, NetappStorage):
return
server = backend.server
try:
server.unmount_volume(volume_name)
server.take_volume_offline(volume_name)
server.destroy_volume(volume_name)
except netapp.api.APIError:
pass
@contextmanager
def ephermeral_volume(backend):
vol = new_volume(backend)
try:
yield vol
finally:
delete_volume(backend, vol['name'])
def on_all_backends(func):
"""
This has to be a separate decorator because the storage
parameter must be initialised on every run with a fresh object.
Will parametrise the decorated test to run once for every type of
storage provided here.
"""
@functools.wraps(func)
@pytest.mark.parametrize("storage,recorder", [(DummyStorage(),
mock.MagicMock())])
def backend_wrapper(*args, **kwargs):
func(*args, **kwargs)
return backend_wrapper
@on_all_backends
def test_get_no_volumes(storage, recorder):
if not isinstance(storage, NetappStorage):
assert storage.volumes == []
@on_all_backends
def test_get_nonexistent_volume(storage, recorder):
with recorder.use_cassette('nonexistent_value'):
with pytest.raises(KeyError):
storage.get_volume('does-not-exist')
@on_all_backends
def test_create_get_volume(storage, recorder):
with recorder.use_cassette('create_get_volume'):
new_vol = storage.create_volume(':/volumename',
name="volume_name_9000",
size_total=DEFAULT_VOLUME_SIZE)
volume = storage.get_volume(id_from_vol(new_vol, storage))
assert new_vol == volume
assert volume
assert volume['size_total'] >= DEFAULT_VOLUME_SIZE
assert volume['size_total'] <= DEFAULT_VOLUME_SIZE * 10
assert volume in storage.volumes
@on_all_backends
def test_create_already_existing_volume(storage, recorder):
with recorder.use_cassette('create_existing_volume'):
with ephermeral_volume(storage) as v:
id = id_from_vol(v, storage)
with pytest.raises(KeyError):
storage.create_volume(id,
name=v['name'],
size_total=DEFAULT_VOLUME_SIZE)
@on_all_backends
def test_restrict_volume(storage, recorder):
with recorder.use_cassette('restrict_volume'):
with ephermeral_volume(storage) as vol:
storage.restrict_volume(id_from_vol(vol, storage))
vol not in storage.volumes
@on_all_backends
def test_patch_volume(storage, recorder):
with recorder.use_cassette('patch_volume'):
with ephermeral_volume(storage) as vol:
storage.patch_volume(id_from_vol(vol, storage),
autosize_enabled=True,
max_autosize=2*DEFAULT_VOLUME_SIZE)
v = storage.get_volume(id_from_vol(vol, storage))
assert v['autosize_enabled'] is True
assert v['max_autosize'] >= 2*DEFAULT_VOLUME_SIZE
assert v['max_autosize'] <= 3*DEFAULT_VOLUME_SIZE
@on_all_backends
def test_get_no_locks(storage, recorder):
with recorder.use_cassette('get_no_locks'):
with ephermeral_volume(storage) as vol:
assert storage.locks(id_from_vol(vol, storage)) is None
@on_all_backends
def test_add_lock(storage, recorder):
if isinstance(storage, NetappStorage):
# NetApp back-end cannot add locks
return
storage.create_volume('volumename')
storage.create_lock('volumename', 'db.cern.ch')
assert 'db.cern.ch' == storage.locks('volumename')
@on_all_backends
def test_remove_lock(storage, recorder):
if isinstance(storage, NetappStorage):
# NetApp cannot add locks
return
storage.create_volume('volumename')
storage.create_lock('volumename', 'db.cern.ch')
storage.remove_lock('volumename', 'db.cern.ch')
assert 'db.cern.ch' != storage.locks('volumename')
assert storage.locks('volumename') is None
storage.create_lock('volumename', 'db2.cern.ch')
assert 'db2.cern.ch' == storage.locks('volumename')
@on_all_backends
def test_remove_lock_wrong_host(storage, recorder):
if isinstance(storage, NetappStorage):
# NetApp cannot add locks
return
storage.create_volume('volumename')
storage.create_lock('volumename', 'db.cern.ch')
storage.remove_lock('volumename', 'othermachine.cern.ch')
assert storage.locks('volumename') == 'db.cern.ch'
@on_all_backends
def test_lock_locked(storage, recorder):
if isinstance(storage, NetappStorage):
# NetApp cannot add locks
return
storage.create_volume('volumename')
storage.create_lock('volumename', 'db.cern.ch')
with pytest.raises(ValueError):
storage.create_lock('volumename', 'db2.cern.ch')
@on_all_backends
def test_get_snapshots(storage, recorder):
with recorder.use_cassette('get_snapshots'):
with ephermeral_volume(storage) as vol:
volume_id = id_from_vol(vol, storage)
storage.create_snapshot(volume_id, snapshot_name="snapshot-new")
snapshots = storage.get_snapshots(volume_id)
assert len(snapshots) == 1
snapshot_name = storage.get_snapshot(
volume_id, "snapshot-new")['name']
assert snapshot_name == "snapshot-new"
assert snapshots[0]['name'] == "snapshot-new"
@on_all_backends
def test_set_policy(storage, recorder):
rules = ["host1.db.cern.ch", "db.cern.ch", "foo.cern.ch"]
with recorder.use_cassette('set_policy'):
with ephermeral_volume(storage) as vol:
volume_id = id_from_vol(vol, storage)
storage.create_policy("a_policy_400", rules)
storage.set_policy(volume_name=volume_id,
policy_name="a_policy_400")
vol_after = storage.get_volume(volume_id)
assert vol_after['active_policy_name'] == "a_policy_400"
storage.remove_policy("a_policy_400")
@pytest.mark.skipif('ONTAP_HOST' not in os.environ,
reason="Requires a live filer")
@on_all_backends
def test_delete_policy(storage, recorder):
rules = ["host1.db.cern.ch", "db.cern.ch"]
policy_name = "a_policy_925"
storage.create_policy(policy_name, rules)
assert storage.get_policy(policy_name=policy_name)
storage.remove_policy(policy_name)
with pytest.raises(KeyError):
storage.get_policy(policy_name=policy_name)
storage.create_policy(policy_name, rules)
assert storage.get_policy(policy_name=policy_name)
storage.remove_policy(policy_name)
with pytest.raises(KeyError):
storage.get_policy(policy_name=policy_name)
@on_all_backends
def test_remove_policy_no_policy_raises_key_error(storage, recorder):
with recorder.use_cassette('remove_nonexistent_policy'):
with pytest.raises(KeyError):
storage.remove_policy("a_policy_that_doesnt_exist")
@on_all_backends
def test_clone_volume(storage, recorder):
if isinstance(storage, NetappStorage):
# Not currently supported :(
return
volume_name = uuid.uuid1()
storage.create_volume(volume_name=volume_name)
with pytest.raises(KeyError):
storage.clone_volume("vol2-clone",
from_volume_name=volume_name,
from_snapshot_name="mysnap")
storage.create_snapshot(volume_name=volume_name, snapshot_name="mysnap")
storage.clone_volume("vol2-clone",
from_volume_name=volume_name,
from_snapshot_name="mysnap")
vol = storage.get_volume(volume_name)
clone = storage.get_volume("vol2-clone")
with pytest.raises(ValueError):
storage.clone_volume(volume_name,
from_volume_name=volume_name,
from_snapshot_name="mysnap")
assert vol == clone
@on_all_backends
def test_delete_snapshot(storage, recorder):
snapshot_name = "snapshot123"
with recorder.use_cassette('remove_snapshot'):
with ephermeral_volume(storage) as vol:
volume_id = id_from_vol(vol, storage)
with pytest.raises(KeyError):
storage.delete_snapshot(volume_id, snapshot_name)
storage.create_snapshot(volume_id, snapshot_name)
storage.delete_snapshot(volume_id, snapshot_name)
assert storage.get_snapshots(volume_id) == []
@on_all_backends
def test_rollback_volume(storage, recorder):
if isinstance(storage, NetappStorage):
# Not currently supported :(
return
volume_name = str(uuid.uuid1())
storage.create_volume(volume_name=volume_name)
with pytest.raises(KeyError):
storage.rollback_volume(volume_name, restore_snapshot_name=volume_name)
storage.create_snapshot(volume_name, volume_name)
storage.rollback_volume(volume_name, restore_snapshot_name=volume_name)
# FIXME: no way of verifying that something was actually done
@pytest.mark.skipif('ONTAP_HOST' not in os.environ,
reason="Requires a live filer")
@on_all_backends
def test_ensure_policy_rule_present(storage, recorder):
rule = "127.0.0.1"
policy_name = "policy126"
storage.create_policy(policy_name=policy_name,
rules=[])
storage.ensure_policy_rule_absent(policy_name=policy_name,
rule=rule)
storage.ensure_policy_rule_present(policy_name=policy_name,
rule=rule)
all_policies = storage.get_policy(policy_name=policy_name)
assert all_policies == [rule]
for r in 4 * [rule]:
storage.ensure_policy_rule_present(policy_name=policy_name,
rule=r)
assert storage.get_policy(policy_name=policy_name) == [rule]
storage.remove_policy(policy_name)
@pytest.mark.skipif('ONTAP_HOST' not in os.environ,
reason="Requires a live filer")
@on_all_backends
def test_ensure_policy_rule_absent(storage, recorder):
rule = "127.0.0.1"
policy_name = "a_policy_53"
try:
storage.create_policy(policy_name=policy_name,
rules=[rule])
assert rule in storage.get_policy(policy_name=policy_name)
for _ in range(1, 3):
storage.ensure_policy_rule_absent(policy_name=policy_name,
rule=rule)
assert storage.get_policy(policy_name=policy_name) == []
finally:
try:
storage.remove_policy(policy_name=policy_name)
except KeyError:
pass
@on_all_backends
def test_repr_doesnt_crash(storage, recorder):
assert repr(storage)
@on_all_backends
def test_netapp_name_jp_both_work(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('jp_equals_name_node'):
with ephermeral_volume(storage) as vol:
node_colon_jp = id_from_vol(vol, storage)
from_jp_name = storage.get_volume(node_colon_jp)
from_name_only = storage.get_volume(vol['name'])
assert from_jp_name == from_name_only
@on_all_backends
def test_netapp_create_volume_no_node(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('tricky_netapp_create_no_node'):
try:
new_vol = storage.create_volume(':/volumename_9001',
name="volume_name_9001",
size_total=DEFAULT_VOLUME_SIZE)
volume = storage.get_volume(new_vol['name'])
assert new_vol == volume
assert volume in storage.volumes
assert 'filer_address' in new_vol
finally:
delete_volume(storage, "volume_name_9001")
@on_all_backends
def test_netapp_create_volume_as_name(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('tricky_netapp_create_as_name'):
try:
new_vol = storage.create_volume("volume_name_test_32",
junction_path="/volume_name_test",
size_total=DEFAULT_VOLUME_SIZE)
assert new_vol['junction_path'] == "/volume_name_test"
assert new_vol['name'] == "volume_name_test_32"
finally:
delete_volume(storage, new_vol['name'])
@on_all_backends
def test_netapp_create_volume_name_missing(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with pytest.raises(ValueError):
storage.create_volume(':/volumename',
size_total=DEFAULT_VOLUME_SIZE)
@on_all_backends
def test_netapp_create_volume_jp_missing(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with pytest.raises(ValueError):
storage.create_volume("volume_name_test_32",
size_total=DEFAULT_VOLUME_SIZE)
@on_all_backends
def test_netapp_create_volume_w_snapshot_reserve(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('netapp_create_snapshot_reserve'):
PERCENT_RESERVED = 20
try:
new_vol = storage.create_volume(
"volume_name_test_32",
junction_path="/volume_name_test",
percentage_snapshot_reserve=PERCENT_RESERVED,
size_total=DEFAULT_VOLUME_SIZE)
assert new_vol['percentage_snapshot_reserve'] == PERCENT_RESERVED
finally:
delete_volume(storage, new_vol['name'])
@on_all_backends
def test_netapp_update_snapshot_reserve(storage, recorder):
PERCENT_RESERVED = 27
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('netapp_update_snapshot_reserve'):
with ephermeral_volume(storage) as vol:
storage.patch_volume(volume_name=vol['name'],
percentage_snapshot_reserve=PERCENT_RESERVED)
updated_volume = storage.get_volume(vol['name'])
assert (updated_volume['percentage_snapshot_reserve']
== PERCENT_RESERVED)
@on_all_backends
def test_netapp_update_volume_compression(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('netapp_update_compression_settings'):
with ephermeral_volume(storage) as vol:
new_compression = not(vol['compression_enabled'])
new_inline_compression = not(vol['inline_compression'])
storage.patch_volume(volume_name=vol['name'],
compression_enabled=new_compression,
inline_compression=new_inline_compression)
updated_volume = storage.get_volume(vol['name'])
assert (updated_volume['compression_enabled']
== new_compression)
assert (updated_volume['inline_compression']
== new_inline_compression)
@on_all_backends
def test_netapp_create_volume_w_compression(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('netapp_create_vol_w_compression'):
try:
new_vol = storage.create_volume(
"volume_name_test_32",
junction_path="/volume_name_test",
compression_enabled=True,
inline_compression=True,
size_total=DEFAULT_VOLUME_SIZE)
assert new_vol['compression_enabled'] is True
assert new_vol['inline_compression'] is True
finally:
delete_volume(storage, new_vol['name'])
try:
new_vol = storage.create_volume(
"volume_name_test_32",
junction_path="/volume_name_test",
compression_enabled=False,
inline_compression=False,
size_total=DEFAULT_VOLUME_SIZE)
assert new_vol['compression_enabled'] is False
assert new_vol['inline_compression'] is False
finally:
delete_volume(storage, new_vol['name'])
@pytest.mark.skipif('ONTAP_HOST' not in os.environ,
reason="Requires a live filer")
@on_all_backends
def test_all_policies_formatting_bug(storage, recorder):
rules = ["host1.db.cern.ch", "db.cern.ch", "foo.cern.ch"]
try:
storage.create_policy("a_policy_400", rules)
found = False
for policy in storage.policies:
if policy['name'] == "a_policy_400":
assert rules == policy['rules']
found = True
break
assert found
finally:
storage.remove_policy("a_policy_400")
@on_all_backends
def test_netapp_create_volume_w_policy(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
policy_name = "test_32_policy"
with recorder.use_cassette('netapp_create_vol_w_policy'):
try:
storage.create_policy(policy_name, [])
new_vol = storage.create_volume(
"volume_name_test_32",
junction_path="/volume_name_test",
active_policy_name=policy_name,
size_total=DEFAULT_VOLUME_SIZE)
assert new_vol['active_policy_name'] == policy_name
finally:
delete_volume(storage, "volume_name_test_32")
storage.remove_policy(policy_name)
@on_all_backends
def test_netapp_update_volume_policy(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
policy_name = "test_32_policy"
with recorder.use_cassette('netapp_update_volume_policy'):
storage.create_policy(policy_name, [])
try:
with ephermeral_volume(storage) as vol:
storage.patch_volume(volume_name=vol['name'],
active_policy_name=policy_name)
updated_volume = storage.get_volume(vol['name'])
assert updated_volume['active_policy_name'] == policy_name
finally:
storage.remove_policy(policy_name)
@on_all_backends
def test_resize_volume(storage, recorder):
with recorder.use_cassette('resize_volume'):
with ephermeral_volume(storage) as vol:
new_size = vol['size_total'] * 2
storage.patch_volume(volume_name=vol['name'],
size_total=new_size)
updated_volume = storage.get_volume(vol['name'])
assert updated_volume['size_total'] == new_size
@on_all_backends
def test_has_caching_policy(storage, recorder):
if not isinstance(storage, NetappStorage):
# Only applies to netapp
return
with recorder.use_cassette('has_caching_policy'):
with ephermeral_volume(storage) as vol:
assert 'caching_policy' in vol
|
pavelkuchin/tracktrains | refs/heads/master | profiles/migrations/0002_auto_20150509_1528.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tracktrainsuser',
name='tasks_limit',
field=models.PositiveSmallIntegerField(default=4, verbose_name=b"The user's limit of tasks."),
),
migrations.AlterField(
model_name='tracktrainsuser',
name='inviter',
field=models.ForeignKey(verbose_name=b'The person who invited this user.', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='tracktrainsuser',
name='invites_counter',
field=models.PositiveSmallIntegerField(default=0, verbose_name=b'The number of remaining invitations.'),
),
]
|
MakersF/cassiopeia | refs/heads/master | cassiopeia/type/core/champion.py | 1 | import cassiopeia.riotapi
import cassiopeia.type.core.common
import cassiopeia.type.dto.champion
@cassiopeia.type.core.common.inheritdocs
class ChampionStatus(cassiopeia.type.core.common.CassiopeiaObject):
dto_type = cassiopeia.type.dto.champion.Champion
def __str__(self):
return "Status ({champ})".format(champ=self.champion)
@property
def enabled(self):
"""bool whether the champion is currently enabled"""
return self.data.active
@property
def custom_enabled(self):
"""bool whether the champion is currently enabled for custom games"""
return self.data.botEnabled
@property
def coop_ai_enabled(self):
"""bool whether the champion is currently enabled for coop vs ai games"""
return self.data.botMmEnabled
@property
def free(self):
"""bool whether the champion is currently free this week"""
return self.data.freeToPlay
@property
def champion(self):
"""Champion the Champion this status is for"""
return cassiopeia.riotapi.get_champion_by_id(self.data.id) if self.data.id else None
@property
def ranked_enabled(self):
"""bool whether the champion is currently enabled for ranked games"""
return self.data.rankedPlayEnabled
###############################
# Dynamic SQLAlchemy bindings #
###############################
def _sa_rebind_all():
ChampionStatus.dto_type = cassiopeia.type.dto.champion.Champion
|
benoitsteiner/tensorflow-opencl | refs/heads/master | tensorflow/python/ops/matmul_benchmark_test.py | 33 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for matmul_benchmark.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import matmul_benchmark
from tensorflow.python.platform import test as googletest
from tensorflow.python.platform import tf_logging
def BuildGraphTest(n, m, k, transpose_a, transpose_b, dtype):
def Test(self):
if not googletest.is_gpu_available():
tf_logging.info("Skipping BuildGraphTest %s", (n, m, k, transpose_a,
transpose_b))
return
tf_logging.info("Testing BuildGraphTest %s", (n, m, k, transpose_a,
transpose_b))
self._VerifyBuildGraph(n, m, k, transpose_a, transpose_b, dtype)
return Test
def RunGraphTest(n, m, k, transpose_a, transpose_b, dtype):
def Test(self):
if not googletest.is_gpu_available():
tf_logging.info("Skipping RunGraphTest %s", (n, m, k, transpose_a,
transpose_b))
return
tf_logging.info("Testing RunGraphTest %s", (n, m, k, transpose_a,
transpose_b))
self._VerifyRunGraph(n, m, k, transpose_a, transpose_b, dtype)
return Test
class MatmulBenchmarkTest(googletest.TestCase):
def _StripNode(self, nd):
snode = node_def_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input)
if nd.device:
snode.device = nd.device
return snode
def _StripGraph(self, gd):
return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node])
def _VerifyBuildGraph(self, n, m, k, transpose_a, transpose_b, dtype):
graph = ops.Graph()
with graph.as_default():
matmul_benchmark.build_graph(googletest.gpu_device_name(), n, m, k, transpose_a, transpose_b,
dtype)
gd = graph.as_graph_def()
dev=googletest.gpu_device_name()
proto_expected = """
node { name: "random_uniform/shape" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform/min" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform/max" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform/RandomUniform" op: "RandomUniform" input: "random_uniform/shape" device: \""""+ dev +"""\" }
node { name: "random_uniform/sub" op: "Sub" input: "random_uniform/max" input: "random_uniform/min" device: \""""+ dev +"""\" }
node { name: "random_uniform/mul" op: "Mul" input: "random_uniform/RandomUniform" input: "random_uniform/sub" device: \""""+ dev +"""\" }
node { name: "random_uniform" op: "Add" input: "random_uniform/mul" input: "random_uniform/min" device: \""""+ dev +"""\" }
node { name: "Variable" op: "VariableV2" device: \""""+ dev +"""\" }
node { name: "Variable/Assign" op: "Assign" input: "Variable" input: "random_uniform" device: \""""+ dev +"""\" }
node { name: "Variable/read" op: "Identity" input: "Variable" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/shape" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/min" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/max" op: "Const" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/RandomUniform" op: "RandomUniform" input: "random_uniform_1/shape" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/sub" op: "Sub" input: "random_uniform_1/max" input: "random_uniform_1/min" device: \""""+ dev +"""\" }
node { name: "random_uniform_1/mul" op: "Mul" input: "random_uniform_1/RandomUniform" input: "random_uniform_1/sub" device: \""""+ dev +"""\" }
node { name: "random_uniform_1" op: "Add" input: "random_uniform_1/mul" input: "random_uniform_1/min" device: \""""+ dev +"""\" }
node { name: "Variable_1" op: "VariableV2" device: \""""+ dev +"""\" }
node { name: "Variable_1/Assign" op: "Assign" input: "Variable_1" input: "random_uniform_1" device: \""""+ dev +"""\" }
node { name: "Variable_1/read" op: "Identity" input: "Variable_1" device: \""""+ dev +"""\" }
node { name: "MatMul" op: "MatMul" input: "Variable/read" input: "Variable_1/read" device: \""""+ dev +"""\" }
node { name: "group_deps" op: "NoOp" input: "^MatMul" device: \""""+ dev +"""\" }
"""
self.assertProtoEquals(str(proto_expected), self._StripGraph(gd))
def _VerifyRunGraph(self, n, m, k, transpose_a, transpose_b, dtype):
benchmark_instance = matmul_benchmark.MatmulBenchmark()
duration = benchmark_instance.run_graph(googletest.gpu_device_name(), n, m, k, transpose_a,
transpose_b, 1, dtype)
self.assertTrue(duration > 1e-6)
if __name__ == "__main__":
dtypes = [np.float32, np.float64]
index = 0
for _dtype in dtypes:
for _n, _m, (_transpose_a, _transpose_b) in itertools.product(
[512, 1024], [1, 8, 16, 128], [(False, False), (True, False), (False,
True)]):
_k = _n
setattr(MatmulBenchmarkTest, "testBuildGraph_" + str(index),
BuildGraphTest(_n, _m, _k, _transpose_a, _transpose_b, _dtype))
setattr(MatmulBenchmarkTest, "testRunGraph_" + str(index),
RunGraphTest(_n, _m, _k, _transpose_a, _transpose_b, _dtype))
index += 1
googletest.main()
|
gem/oq-engine | refs/heads/master | openquake/hazardlib/tests/scalerel/__init__.py | 7 | # The Hazard Library
# Copyright (C) 2012-2021 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
ojengwa/oh-mainline | refs/heads/master | vendor/packages/gdata/src/gdata/docs/__init__.py | 263 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Documents."""
__author__ = ('api.jfisher (Jeff Fisher), '
'api.eric@google.com (Eric Bidelman)')
import atom
import gdata
DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007'
class Scope(atom.AtomBase):
"""The DocList ACL scope element"""
_tag = 'scope'
_namespace = gdata.GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
_attributes['type'] = 'type'
def __init__(self, value=None, type=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.type = type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Role(atom.AtomBase):
"""The DocList ACL role element"""
_tag = 'role'
_namespace = gdata.GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class FeedLink(atom.AtomBase):
"""The DocList gd:feedLink element"""
_tag = 'feedLink'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
def __init__(self, href=None, rel=None, text=None, extension_elements=None,
extension_attributes=None):
self.href = href
self.rel = rel
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class ResourceId(atom.AtomBase):
"""The DocList gd:resourceId element"""
_tag = 'resourceId'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class LastModifiedBy(atom.Person):
"""The DocList gd:lastModifiedBy element"""
_tag = 'lastModifiedBy'
_namespace = gdata.GDATA_NAMESPACE
class LastViewed(atom.Person):
"""The DocList gd:lastViewed element"""
_tag = 'lastViewed'
_namespace = gdata.GDATA_NAMESPACE
class WritersCanInvite(atom.AtomBase):
"""The DocList docs:writersCanInvite element"""
_tag = 'writersCanInvite'
_namespace = DOCUMENTS_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
class DocumentListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink)
_children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId',
ResourceId)
_children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy',
LastModifiedBy)
_children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed',
LastViewed)
_children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = (
'writersCanInvite', WritersCanInvite)
def __init__(self, resourceId=None, feedLink=None, lastViewed=None,
lastModifiedBy=None, writersCanInvite=None, author=None,
category=None, content=None, atom_id=None, link=None,
published=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
self.feedLink = feedLink
self.lastViewed = lastViewed
self.lastModifiedBy = lastModifiedBy
self.resourceId = resourceId
self.writersCanInvite = writersCanInvite
gdata.GDataEntry.__init__(
self, author=author, category=category, content=content,
atom_id=atom_id, link=link, published=published, title=title,
updated=updated, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def GetAclLink(self):
"""Extracts the DocListEntry's <gd:feedLink>.
Returns:
A FeedLink object.
"""
return self.feedLink
def GetDocumentType(self):
"""Extracts the type of document from the DocListEntry.
This method returns the type of document the DocListEntry
represents. Possible values are document, presentation,
spreadsheet, folder, or pdf.
Returns:
A string representing the type of document.
"""
if self.category:
for category in self.category:
if category.scheme == gdata.GDATA_NAMESPACE + '#kind':
return category.label
else:
return None
def DocumentListEntryFromString(xml_string):
"""Converts an XML string into a DocumentListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A DocumentListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListEntry, xml_string)
class DocumentListAclEntry(gdata.GDataEntry):
"""A DocList ACL Entry flavor of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope)
_children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role)
def __init__(self, category=None, atom_id=None, link=None,
title=None, updated=None, scope=None, role=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=None, category=category,
content=None, atom_id=atom_id, link=link,
published=None, title=title,
updated=updated, text=None)
self.scope = scope
self.role = role
def DocumentListAclEntryFromString(xml_string):
"""Converts an XML string into a DocumentListAclEntry object.
Args:
xml_string: string The XML describing a Document List ACL feed entry.
Returns:
A DocumentListAclEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string)
class DocumentListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[DocumentListEntry])
def DocumentListFeedFromString(xml_string):
"""Converts an XML string into a DocumentListFeed object.
Args:
xml_string: string The XML describing a DocumentList feed.
Returns:
A DocumentListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListFeed, xml_string)
class DocumentListAclFeed(gdata.GDataFeed):
"""A DocList ACL feed flavor of a Atom feed"""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[DocumentListAclEntry])
def DocumentListAclFeedFromString(xml_string):
"""Converts an XML string into a DocumentListAclFeed object.
Args:
xml_string: string The XML describing a DocumentList feed.
Returns:
A DocumentListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(DocumentListAclFeed, xml_string)
|
eugenejen/AutobahnPython | refs/heads/master | examples/twisted/wamp1/pubsub/loadlatency/client.py | 17 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import time, sys, argparse
from autobahn.twisted.choosereactor import install_reactor
install_reactor()
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
import autobahn
from autobahn.twisted.websocket import connectWS
from autobahn.wamp1.protocol import WampClientFactory, \
WampClientProtocol, \
WampCraClientProtocol
class LoadLatencySubscriberProtocol(WampCraClientProtocol):
def onSessionOpen(self):
if self.factory.config.debug:
print "Load/Latency Subscriber Client connected to %s [skiputf8validate = %s, skipmasking = %s]" % (self.factory.config.wsuri, self.factory.config.skiputf8validate, self.factory.config.skipmasking)
if self.factory.config.secure:
## do WAMP-CRA authentication as anonymous
d = self.authenticate()
d.addCallbacks(self.onAuthSuccess, self.onAuthError)
else:
self.onReady()
def onAuthSuccess(self, permissions):
#print "Authenticated.", permissions
#print "Authenticated."
self.onReady()
def onAuthError(self, e):
uri, desc, details = e.value.args
print "Authentication Error!", uri, desc, details
def onReady(self):
def onEvent(topic, event):
rtt = time.clock() - event['sent']
self.factory.receivedRtts.append(rtt)
self.factory.receivedCnt += 1
self.subscribe(self.factory.config.topic, onEvent)
self.factory._ready.callback(None)
class LoadLatencySubscriberFactory(WampClientFactory):
protocol = LoadLatencySubscriberProtocol
def __init__(self, config, d):
WampClientFactory.__init__(self, config.wsuri, debugWamp = config.debug)
self._ready = d
self.config = config
self.receivedCnt = 0
self.receivedRtts = []
self.setProtocolOptions(failByDrop = False)
if config.skiputf8validate:
self.setProtocolOptions(utf8validateIncoming = False)
if config.skipmasking:
self.setProtocolOptions(maskClientFrames = False)
def clientConnectionFailed(self, connector, reason):
pass
#print reason
def clientConnectionLost(self, connector, reason):
pass
#print reason
class LoadLatencyPublisherProtocol(WampCraClientProtocol):
def onSessionOpen(self):
if self.factory.config.debug:
print "Load/Latency Publisher Client connected to %s [skiputf8validate = %s, skipmasking = %s]" % (self.factory.config.wsuri, self.factory.config.skiputf8validate, self.factory.config.skipmasking)
if self.factory.config.secure:
## do WAMP-CRA authentication as anonymous
d = self.authenticate()
d.addCallbacks(self.onAuthSuccess, self.onAuthError)
else:
self.onReady()
def onAuthSuccess(self, permissions):
print "Authenticated."
self.onReady()
def onAuthError(self, e):
uri, desc, details = e.value.args
print "Authentication Error!", uri, desc, details
def onReady(self):
def sendEvent():
self.factory.batchid += 1
msg = {'msg': '*' * self.factory.config.payload}
for i in xrange(self.factory.config.batch):
self.factory.id += 1
self.factory.publishedCnt += 1
#msg['id'] = self.factory.id
#msg['batchid'] = self.factory.batchid
msg['sent'] = time.clock()
self.publish(self.factory.config.topic, msg)
reactor.callLater(1. / float(self.factory.config.rate), sendEvent)
sendEvent()
class LoadLatencyPublisherFactory(WampClientFactory):
protocol = LoadLatencyPublisherProtocol
def __init__(self, config):
WampClientFactory.__init__(self, config.wsuri, debugWamp = config.debug)
self.config = config
self.id = 0
self.batchid = 0
self.publishedCnt = 0
self.setProtocolOptions(failByDrop = False)
if config.skiputf8validate:
self.setProtocolOptions(utf8validateIncoming = False)
if config.skipmasking:
self.setProtocolOptions(maskClientFrames = False)
class LoadLatencyTest:
def __init__(self, config):
self.config = config
self.linesPrinted = 0
self.publishedCntTotal = 0
self.receivedCntTotal = 0
def run(self):
self._factories = []
dl = []
t0 = 0
for i in xrange(self.config.clients):
d = Deferred()
dl.append(d)
factory = LoadLatencySubscriberFactory(self.config, d)
self._factories.append(factory)
reactor.callLater(t0, connectWS, factory)
t0 += 1. / float(self.config.uprate)
d2 = DeferredList(dl)
def start_publishing(res):
print "ok, %d clients all connected, start publishing .." % len(res)
#print res
publisherFactory = LoadLatencyPublisherFactory(self.config)
connectWS(publisherFactory)
def printstats():
if self.linesPrinted % 20 == 0:
print
print "Parameters: %d clients, %d uprate, %d payload, %d batchsize, %d rate" % (self.config.clients, self.config.uprate, self.config.payload, self.config.batch, self.config.rate)
print "Messages: sent_last, recv_last, sent_total, recv_total, avg. rtt (ms)"
print
self.linesPrinted += 1
publishedCnt = publisherFactory.publishedCnt
receivedCnt = sum([x.receivedCnt for x in self._factories])
receivedSumRtts = 1000. * sum([sum(x.receivedRtts) for x in self._factories])
if receivedCnt:
rttAvg = float(receivedSumRtts) / float(receivedCnt)
else:
rttAvg = 0
publisherFactory.publishedCnt = 0
for f in self._factories:
f.receivedCnt = 0
f.receivedRtts = []
self.publishedCntTotal += publishedCnt
self.receivedCntTotal += receivedCnt
print "%d, %d, %d, %d, %f" % (publishedCnt, receivedCnt, self.publishedCntTotal, self.receivedCntTotal, rttAvg)
reactor.callLater(1, printstats)
printstats()
d2.addCallback(start_publishing)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog = "llclient",
description = "Load/Latency Test Client")
parser.add_argument("-d",
"--debug",
help = "Enable debug output.",
action = "store_true")
parser.add_argument("--skiputf8validate",
help = "Skip UTF8 validation of incoming messages.",
action = "store_true")
parser.add_argument("--skipmasking",
help = "Skip masking of sent frames.",
action = "store_true")
parser.add_argument("-c",
"--clients",
type = int,
default = 10,
help = "Number of WAMP clients to connect.")
parser.add_argument("-p",
"--payload",
type = int,
default = 32,
help = "Length of string field payload in bytes of each published message.")
parser.add_argument("-b",
"--batch",
type = int,
default = 1,
help = "Number of messages published per batch.")
parser.add_argument("-r",
"--rate",
type = float,
default = 25.,
help = "Number of batches per second.")
parser.add_argument("-u",
"--uprate",
type = int,
default = 15,
help = "Connect rate in new connections per seconds.")
parser.add_argument("-w",
"--wsuri",
type = str,
default = "ws://127.0.0.1:9000",
help = "URI of WAMP server, e.g. ws://127.0.0.1:9000.")
parser.add_argument("-t",
"--topic",
type = str,
default = "http://example.com/simple",
help = "Topic URI to use, e.g. http://example.com/simple")
parser.add_argument("-s",
"--secure",
help = "Enable WAMP-CRA authentication (as anonymous) - This is for testing with Crossbar.io",
action = "store_true")
config = parser.parse_args()
if config.debug:
log.startLogging(sys.stdout)
test = LoadLatencyTest(config)
test.run()
print reactor.__class__
print autobahn.utf8validator.Utf8Validator
print autobahn.xormasker.XorMaskerNull
print autobahn.wamp1.protocol.json_lib
reactor.run()
|
deepmind/deepmind-research | refs/heads/master | side_effects_penalties/results_summary.py | 1 | # Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plot results for different side effects penalties.
Loads csv result files generated by `run_experiment' and outputs a summary data
frame in a csv file to be used for plotting by plot_results.ipynb.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import app
from absl import flags
import pandas as pd
from side_effects_penalties.file_loading import load_files
FLAGS = flags.FLAGS
if __name__ == '__main__': # Avoid defining flags when used as a library.
flags.DEFINE_string('path', '', 'File path.')
flags.DEFINE_string('input_suffix', '',
'Filename suffix to use when loading data files.')
flags.DEFINE_string('output_suffix', '',
'Filename suffix to use when saving files.')
flags.DEFINE_bool('bar_plot', True,
'Make a data frame for a bar plot (True) ' +
'or learning curves (False)')
flags.DEFINE_string('env_name', 'box', 'Environment name.')
flags.DEFINE_bool('noops', True, 'Whether the environment includes noops.')
flags.DEFINE_list('beta_list', [0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0],
'List of beta values.')
flags.DEFINE_list('seed_list', [1], 'List of random seeds.')
flags.DEFINE_bool('compare_penalties', True,
'Compare different penalties using the best beta value ' +
'for each penalty (True), or compare different beta values '
+ 'for the same penalty (False).')
flags.DEFINE_enum('dev_measure', 'rel_reach',
['none', 'reach', 'rel_reach', 'att_util'],
'Deviation measure (used if compare_penalties=False).')
flags.DEFINE_enum('dev_fun', 'truncation', ['truncation', 'absolute'],
'Summary function for the deviation measure ' +
'(used if compare_penalties=False)')
flags.DEFINE_float('value_discount', 0.99,
'Discount factor for deviation measure value function ' +
'(used if compare_penalties=False)')
def beta_choice(baseline, dev_measure, dev_fun, value_discount, env_name,
beta_list, seed_list, noops=False, path='', suffix=''):
"""Choose beta value that gives the highest final performance."""
if dev_measure == 'none':
return 0.1
perf_max = float('-inf')
best_beta = 0.0
for beta in beta_list:
df = load_files(baseline=baseline, dev_measure=dev_measure,
dev_fun=dev_fun, value_discount=value_discount, beta=beta,
env_name=env_name, noops=noops, path=path, suffix=suffix,
seed_list=seed_list)
if df.empty:
perf = float('-inf')
else:
perf = df['performance_smooth'].mean()
if perf > perf_max:
perf_max = perf
best_beta = beta
return best_beta
def penalty_label(dev_measure, dev_fun, value_discount):
"""Penalty label specifying design choices."""
dev_measure_labels = {
'none': 'None', 'rel_reach': 'RR', 'att_util': 'AU', 'reach': 'UR'}
label = dev_measure_labels[dev_measure]
disc_lab = 'u' if value_discount == 1.0 else 'd'
dev_lab = ''
if dev_measure in ['rel_reach', 'att_util']:
dev_lab = 't' if dev_fun == 'truncation' else 'a'
if dev_measure != 'none':
label = label + '(' + disc_lab + dev_lab + ')'
return label
def make_summary_data_frame(
env_name, beta_list, seed_list, final=True, baseline=None, dev_measure=None,
dev_fun=None, value_discount=None, noops=False, compare_penalties=True,
path='', input_suffix='', output_suffix=''):
"""Make summary dataframe from multiple csv result files and output to csv."""
# For each of the penalty parameters (baseline, dev_measure, dev_fun, and
# value_discount), compare a list of multiple values if the parameter is None,
# or use the provided parameter value if it is not None
baseline_list = ['start', 'inaction', 'stepwise', 'step_noroll']
if dev_measure is not None:
dev_measure_list = [dev_measure]
else:
dev_measure_list = ['none', 'reach', 'rel_reach', 'att_util']
dataframes = []
for dev_measure in dev_measure_list:
# These deviation measures don't have a deviation function:
if dev_measure in ['reach', 'none']:
dev_fun_list = ['none']
elif dev_fun is not None:
dev_fun_list = [dev_fun]
else:
dev_fun_list = ['truncation', 'absolute']
# These deviation measures must be discounted:
if dev_measure in ['none', 'att_util']:
value_discount_list = [0.99]
elif value_discount is not None:
value_discount_list = [value_discount]
else:
value_discount_list = [0.99, 1.0]
for baseline in baseline_list:
for vd in value_discount_list:
for devf in dev_fun_list:
# Choose the best beta for this set of penalty parameters if
# compare_penalties=True, or compare all betas otherwise
if compare_penalties:
beta = beta_choice(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, env_name=env_name, noops=noops,
beta_list=beta_list, seed_list=seed_list, path=path,
suffix=input_suffix)
betas = [beta]
else:
betas = beta_list
for beta in betas:
label = penalty_label(
dev_measure=dev_measure, dev_fun=devf, value_discount=vd)
df_part = load_files(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, beta=beta, env_name=env_name,
noops=noops, path=path, suffix=input_suffix, final=final,
seed_list=seed_list)
df_part = df_part.assign(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, beta=beta, env_name=env_name, label=label)
dataframes.append(df_part)
df = pd.concat(dataframes, sort=False)
# Output summary data frame
final_str = '_final' if final else ''
if compare_penalties:
filename = ('df_summary_penalties_' + env_name + final_str +
output_suffix + '.csv')
else:
filename = ('df_summary_betas_' + env_name + '_' + dev_measure + '_' +
dev_fun + '_' + str(value_discount) + final_str + output_suffix
+ '.csv')
f = os.path.join(path, filename)
df.to_csv(f)
return df
def main(unused_argv):
compare_penalties = FLAGS.compare_penalties
dev_measure = None if compare_penalties else FLAGS.dev_measure
dev_fun = None if compare_penalties else FLAGS.dev_fun
value_discount = None if compare_penalties else FLAGS.value_discount
make_summary_data_frame(
compare_penalties=compare_penalties, env_name=FLAGS.env_name,
noops=FLAGS.noops, final=FLAGS.bar_plot, dev_measure=dev_measure,
value_discount=value_discount, dev_fun=dev_fun, path=FLAGS.path,
input_suffix=FLAGS.input_suffix, output_suffix=FLAGS.output_suffix,
beta_list=FLAGS.beta_list, seed_list=FLAGS.seed_list)
if __name__ == '__main__':
app.run(main)
|
zhimingxie/grpc | refs/heads/master | src/python/grpcio/grpc/framework/foundation/abandonment.py | 47 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for indicating abandonment of computation."""
class Abandoned(Exception):
"""Indicates that some computation is being abandoned.
Abandoning a computation is different than returning a value or raising
an exception indicating some operational or programming defect.
"""
|
mitchelljkotler/django | refs/heads/master | tests/auth_tests/test_auth_backends.py | 200 | from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import (
BACKEND_SESSION_KEY, SESSION_KEY, authenticate, get_user,
)
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import MD5PasswordHasher
from django.contrib.auth.models import AnonymousUser, Group, Permission, User
from django.contrib.auth.tests.custom_user import CustomUser, ExtensionUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, modify_settings, override_settings,
)
from .models import CustomPermissionsUser, UUIDUser
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
)
self.patched_settings.enable()
self.create_users()
def tearDown(self):
self.patched_settings.disable()
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == {'auth.test'}, True)
self.assertEqual(user.get_group_permissions(), set())
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'})
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'}
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), {'auth.test_group'})
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set())
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), {'auth.test'})
def test_anonymous_has_no_permissions(self):
"""
#17903 -- Anonymous users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_anonymous = lambda: True
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_inactive_has_no_permissions(self):
"""
#17903 -- Inactive users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_active = False
user.save()
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=['auth_tests.test_auth_backends.CountingMD5PasswordHasher'])
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists. Refs #20760."""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password('test')
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username='no_such_user', password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
def create_users(self):
self.user = User.objects.create_user(
username='test',
email='test@example.com',
password='test',
)
self.superuser = User.objects.create_superuser(
username='test2',
email='test2@example.com',
password='test',
)
@override_settings(AUTH_USER_MODEL='auth.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test',
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
Tests that the model backend can accept a credentials kwarg labeled with
custom user model's USERNAME_FIELD.
"""
def test_authenticate(self):
test_user = CustomUser._default_manager.create_user(
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
authenticated_user = authenticate(email='test@example.com', password='test')
self.assertEqual(test_user, authenticated_user)
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserTests(TestCase):
def test_login(self):
"""
A custom user with a UUID primary key should be able to login.
"""
user = UUIDUser.objects.create_user(username='uuid', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
self.assertEqual(UUIDUser.objects.get(pk=self.client.session[SESSION_KEY]), user)
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous() and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
@modify_settings(AUTHENTICATION_BACKENDS={
'append': 'auth_tests.test_auth_backends.SimpleRowlevelBackend',
})
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test')
self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test')
def tearDown(self):
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'simple'})
self.assertEqual(self.user2.get_all_permissions(TestObj()), {'simple', 'advanced'})
self.assertEqual(self.user2.get_all_permissions(), set())
def test_get_group_permissions(self):
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), {'group_perm'})
@override_settings(
AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'],
)
class AnonymousUserBackendTest(SimpleTestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
def setUp(self):
self.user1 = AnonymousUser()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'anon'})
@override_settings(AUTHENTICATION_BACKENDS=[])
class NoBackendsTest(TestCase):
"""
Tests that an appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.user = User.objects.create_user('test', 'test@example.com', 'test')
def test_raises_exception(self):
self.assertRaises(ImproperlyConfigured, self.user.has_perm, ('perm', TestObj(),))
@override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'])
class InActiveUserBackendTest(TestCase):
"""
Tests for an inactive user
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.is_active = False
self.user1.save()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
class PermissionDeniedBackend(object):
"""
Always raises PermissionDenied in `authenticate`, `has_perm` and `has_module_perms`.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise PermissionDenied
def has_perm(self, user_obj, perm, obj=None):
raise PermissionDenied
def has_module_perms(self, user_obj, app_label):
raise PermissionDenied
class PermissionDeniedBackendTest(TestCase):
"""
Tests that other backends are not checked once a backend raises PermissionDenied
"""
backend = 'auth_tests.test_auth_backends.PermissionDeniedBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.save()
@modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend})
def test_permission_denied(self):
"user is not authenticated after a backend raises permission denied #2550"
self.assertEqual(authenticate(username='test', password='test'), None)
@modify_settings(AUTHENTICATION_BACKENDS={'append': backend})
def test_authenticates(self):
self.assertEqual(authenticate(username='test', password='test'), self.user1)
@modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend})
def test_has_perm_denied(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), False)
self.assertIs(self.user1.has_module_perms('auth'), False)
@modify_settings(AUTHENTICATION_BACKENDS={'append': backend})
def test_has_perm(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), True)
self.assertIs(self.user1.has_module_perms('auth'), True)
class NewModelBackend(ModelBackend):
pass
class ChangedBackendSettingsTest(TestCase):
"""
Tests for changes in the settings.AUTHENTICATION_BACKENDS
"""
backend = 'auth_tests.test_auth_backends.NewModelBackend'
TEST_USERNAME = 'test_user'
TEST_PASSWORD = 'test_password'
TEST_EMAIL = 'test@example.com'
def setUp(self):
User.objects.create_user(self.TEST_USERNAME,
self.TEST_EMAIL,
self.TEST_PASSWORD)
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_changed_backend_settings(self):
"""
Tests that removing a backend configured in AUTHENTICATION_BACKENDS
make already logged-in users disconnect.
"""
# Get a session for the test user
self.assertTrue(self.client.login(
username=self.TEST_USERNAME,
password=self.TEST_PASSWORD)
)
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
# Remove NewModelBackend
with self.settings(AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend']):
# Get the user from the request
user = get_user(request)
# Assert that the user retrieval is successful and the user is
# anonymous as the backend is not longer available.
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous())
class TypeErrorBackend(object):
"""
Always raises TypeError.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise TypeError
class TypeErrorBackendTest(TestCase):
"""
Tests that a TypeError within a backend is propagated properly.
Regression test for ticket #18171
"""
backend = 'auth_tests.test_auth_backends.TypeErrorBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_type_error_raised(self):
self.assertRaises(TypeError, authenticate, username='test', password='test')
class ImproperlyConfiguredUserModelTest(TestCase):
"""
Tests that an exception from within get_user_model is propagated and doesn't
raise an UnboundLocalError.
Regression test for ticket #21439
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(
username='test',
password='test'
)
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_does_not_shadow_exception(self):
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
self.assertRaises(ImproperlyConfigured, get_user, request)
class ImportedModelBackend(ModelBackend):
pass
class ImportedBackendTests(TestCase):
"""
#23925 - The backend path added to the session should be the same
as the one defined in AUTHENTICATION_BACKENDS setting.
"""
backend = 'auth_tests.backend_alias.ImportedModelBackend'
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_backend_path(self):
username = 'username'
password = 'password'
User.objects.create_user(username, 'email', password)
self.assertTrue(self.client.login(username=username, password=password))
request = HttpRequest()
request.session = self.client.session
self.assertEqual(request.session[BACKEND_SESSION_KEY], self.backend)
|
caterinaurban/Lyra | refs/heads/master | src/lyra/unittests/numerical/interval/forward/indexing3/counting2.py | 1 |
D: Dict[str, List[int]] = {'a': [1], 'b': [1, 2], 'c': [1, 2, 3]}
# FINAL: D -> "a"@[1, 1], "b"@[1, 2], "c"@[1, 3], _@⊥; keys(D) -> 0@[-inf, inf], _@⊥; len(D) -> [3, 3]; values(D) -> 0@[1, 1], _@[2, 3]
|
tgsd96/gargnotes | refs/heads/master | venv/lib/python2.7/site-packages/django/http/utils.py | 134 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
erwilan/ansible | refs/heads/devel | contrib/inventory/nsot.py | 117 | #!/usr/bin/env python
'''
nsot
====
Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
Features
--------
* Define host groups in form of NSoT device attribute criteria
* All parameters defined by the spec as of 2015-09-05 are supported.
+ ``--list``: Returns JSON hash of host groups -> hosts and top-level
``_meta`` -> ``hostvars`` which correspond to all device attributes.
Group vars can be specified in the YAML configuration, noted below.
+ ``--host <hostname>``: Returns JSON hash where every item is a device
attribute.
* In addition to all attributes assigned to resource being returned, script
will also append ``site_id`` and ``id`` as facts to utilize.
Confguration
------------
Since it'd be annoying and failure prone to guess where you're configuration
file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
This file should adhere to the YAML spec. All top-level variable must be
desired Ansible group-name hashed with single 'query' item to define the NSoT
attribute query.
Queries follow the normal NSoT query syntax, `shown here`_
.. _shown here: https://github.com/dropbox/pynsot#set-queries
.. code:: yaml
routers:
query: 'deviceType=ROUTER'
vars:
a: b
c: d
juniper_fw:
query: 'deviceType=FIREWALL manufacturer=JUNIPER'
not_f10:
query: '-manufacturer=FORCE10'
The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
cli would, so make sure that's configured appropriately.
.. note::
Attributes I'm showing above are influenced from ones that the Trigger
project likes. As is the spirit of NSoT, use whichever attributes work best
for your workflow.
If config file is blank or absent, the following default groups will be
created:
* ``routers``: deviceType=ROUTER
* ``switches``: deviceType=SWITCH
* ``firewalls``: deviceType=FIREWALL
These are likely not useful for everyone so please use the configuration. :)
.. note::
By default, resources will only be returned for what your default
site is set for in your ``~/.pynsotrc``.
If you want to specify, add an extra key under the group for ``site: n``.
Output Examples
---------------
Here are some examples shown from just calling the command directly::
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
{
"routers": {
"hosts": [
"test1.example.com"
],
"vars": {
"cool_level": "very",
"group": "routers"
}
},
"firewalls": {
"hosts": [
"test2.example.com"
],
"vars": {
"cool_level": "enough",
"group": "firewalls"
}
},
"_meta": {
"hostvars": {
"test2.example.com": {
"make": "SRX",
"site_id": 1,
"id": 108
},
"test1.example.com": {
"make": "MX80",
"site_id": 1,
"id": 107
}
}
},
"rtr_and_fw": {
"hosts": [
"test1.example.com",
"test2.example.com"
],
"vars": {}
}
}
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
{
"make": "MX80",
"site_id": 1,
"id": 107
}
'''
from __future__ import print_function
import sys
import os
import pkg_resources
import argparse
import json
import yaml
from textwrap import dedent
from pynsot.client import get_api_client
from pynsot.app import HttpServerError
from click.exceptions import UsageError
from six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object):
'''NSoT Client object for gather inventory'''
def __init__(self):
self.config = dict()
config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
if config_env:
try:
config_file = os.path.abspath(config_env)
except IOError: # If file non-existent, use default config
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
with open(config_file) as f:
try:
self.config.update(yaml.safe_load(f))
except TypeError: # If empty file, use default config
warning('Empty config file')
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
else: # Use defaults if env var missing
self._config_default()
self.groups = self.config.keys()
self.client = get_api_client()
self._meta = {'hostvars': dict()}
def _config_default(self):
default_yaml = '''
---
routers:
query: deviceType=ROUTER
switches:
query: deviceType=SWITCH
firewalls:
query: deviceType=FIREWALL
'''
self.config = yaml.safe_load(dedent(default_yaml))
def do_list(self):
'''Direct callback for when ``--list`` is provided
Relies on the configuration generated from init to run
_inventory_group()
'''
inventory = dict()
for group, contents in self.config.items():
group_response = self._inventory_group(group, contents)
inventory.update(group_response)
inventory.update({'_meta': self._meta})
return json.dumps(inventory)
def do_host(self, host):
return json.dumps(self._hostvars(host))
def _hostvars(self, host):
'''Return dictionary of all device attributes
Depending on number of devices in NSoT, could be rather slow since this
has to request every device resource to filter through
'''
device = [i for i in self.client.devices.get()
if host in i['hostname']][0]
attributes = device['attributes']
attributes.update({'site_id': device['site_id'], 'id': device['id']})
return attributes
def _inventory_group(self, group, contents):
'''Takes a group and returns inventory for it as dict
:param group: Group name
:type group: str
:param contents: The contents of the group's YAML config
:type contents: dict
contents param should look like::
{
'query': 'xx',
'vars':
'a': 'b'
}
Will return something like::
{ group: {
hosts: [],
vars: {},
}
'''
query = contents.get('query')
hostvars = contents.get('vars', dict())
site = contents.get('site', dict())
obj = {group: dict()}
obj[group]['hosts'] = []
obj[group]['vars'] = hostvars
try:
assert isinstance(query, string_types)
except:
sys.exit('ERR: Group queries must be a single string\n'
' Group: %s\n'
' Query: %s\n' % (group, query)
)
try:
if site:
site = self.client.sites(site)
devices = site.devices.query.get(query=query)
else:
devices = self.client.devices.query.get(query=query)
except HttpServerError as e:
if '500' in str(e.response):
_site = 'Correct site id?'
_attr = 'Queried attributes actually exist?'
questions = _site + '\n' + _attr
sys.exit('ERR: 500 from server.\n%s' % questions)
else:
raise
except UsageError:
sys.exit('ERR: Could not connect to server. Running?')
# Would do a list comprehension here, but would like to save code/time
# and also acquire attributes in this step
for host in devices:
# Iterate through each device that matches query, assign hostname
# to the group's hosts array and then use this single iteration as
# a chance to update self._meta which will be used in the final
# return
hostname = host['hostname']
obj[group]['hosts'].append(hostname)
attributes = host['attributes']
attributes.update({'site_id': host['site_id'], 'id': host['id']})
self._meta['hostvars'].update({hostname: attributes})
return obj
def parse_args():
desc = __doc__.splitlines()[4] # Just to avoid being redundant
# Establish parser with options and error out if no action provided
parser = argparse.ArgumentParser(
description=desc,
conflict_handler='resolve',
)
# Arguments
#
# Currently accepting (--list | -l) and (--host | -h)
# These must not be allowed together
parser.add_argument(
'--list', '-l',
help='Print JSON object containing hosts to STDOUT',
action='store_true',
dest='list_', # Avoiding syntax highlighting for list
)
parser.add_argument(
'--host', '-h',
help='Print JSON object containing hostvars for <host>',
action='store',
)
args = parser.parse_args()
if not args.list_ and not args.host: # Require at least one option
parser.exit(status=1, message='No action requested')
if args.list_ and args.host: # Do not allow multiple options
parser.exit(status=1, message='Too many actions requested')
return args
def main():
'''Set up argument handling and callback routing'''
args = parse_args()
client = NSoTInventory()
# Callback condition
if args.list_:
print(client.do_list())
elif args.host:
print(client.do_host(args.host))
if __name__ == '__main__':
main()
|
eckucukoglu/arm-linux-gnueabihf | refs/heads/master | arm-linux-gnueabihf/libc/usr/lib/python2.7/unittest/test/test_setups.py | 153 | import sys
from cStringIO import StringIO
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegexp(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
talenhao/ServiceConnectEvolution | refs/heads/master | serconevo/netgraph/__init__.py | 1 | # -*- coding: UTF-8 -*-
"""
# ******************************************************
# * author: "Talen Hao(天飞)<talenhao@gmail.com>" *
# ******************************************************
"""
# todo ok 127.0.0.1 connect 127.0.0.1
# todo ok drop ip map
# todo ok process None:None raddr
import traceback
import re
# import pdb
# import time
# import user modles
from serconevo.agent import script_head
from serconevo.agent import db_close
from serconevo.agent import spend_time
from serconevo.agent import start_end_point
from serconevo import identify_line
from serconevo.agent import connection_table
from serconevo.agent import db_con
from serconevo.netgraph.drawgraph import netgraph_path
from serconevo.netgraph.drawgraph import load_draw
from serconevo.netgraph.drawgraph import pickle_to_file
# for log >>
import logging
import os
from serconevo.log4p import log4p
SCRIPT_NAME = os.path.basename(__file__)
# log end <<
pLogger = log4p.GetLogger(SCRIPT_NAME, logging.DEBUG).get_l()
fetch_list = []
def match_sort(project, string):
"""
The first 5 fields are intercepted.
"""
re_compile = re.compile(r'=(/[-\w]+){1,5}\b')
re_findall = re_compile.search(string)
pLogger.debug("re_findall: {!r}".format(re_findall))
try:
find_one = re_findall.group(0)
except AttributeError:
pLogger.debug("{} has no re compile!! It is : \n{}".format(project, string))
return string
else:
pLogger.debug("find_one is : {!r}".format(find_one))
return find_one
def db_fetchall(sql_cmd, fetch=None):
"""
execute sql_cmd, if no set fetch arg, default return 10 results;
if set fetch arg, it can be set 'one' or 'all'.
:param sql_cmd:
:param fetch:
:return: result
"""
pLogger.debug("SQL_CMD is ==> {!r}".format(sql_cmd))
# pLogger.info("=====> DB operation command result: {!r}".format(db_con.cursor.rowcount))
# pdb.set_trace()
if fetch == 'one':
db_con.dictcursor.execute(sql_cmd)
result = db_con.dictcursor.fetchone()
elif fetch == "all":
db_con.ssdictcursor.execute(sql_cmd)
result = db_con.ssdictcursor.fetchall()
else:
pLogger.error("fetch argument required.")
result = None
return result
def node_match(name, cwd=None, cmdline=None):
"""
process P_name, cmdline maybe any string list;
you cat assign some node name use p_name or use a short identify.
:param name:
:param cwd:
:param cmdline:
:return:
"""
drop_list = ['ssh', 'sshd', 'whois', 'salt-minion', 'salt-master', 'collect.py']
# from_drop_list = ['ssh', 'sshd', 'whois']
# target_drop_list = ['sshd']
# if flag == 'f':
# drop_list = from_drop_list
# elif flag == "t":
# drop_list = target_drop_list
pLogger.debug("param: {}, {}, {}".format(name, cwd, cmdline))
name_list = ['zabbix_server']
if name in drop_list:
return "drop"
elif name == 'java':
node = match_sort('java', "[" + name + "]_" + cwd)
return node
elif name in name_list:
node = name
return node
else:
node = ' '.join(eval(cmdline)[:3]) # rename p_cmdline just use 3 field
return node
def match_nodes(connection_table, r_ip, r_port, server_uuid):
"""
# ip node match
TIME-WAIT 0 0 127.0.0.1:47618 127.0.0.1:8097
TIME-WAIT 0 0 ::ffff:127.0.0.1:8069 ::ffff:127.0.0.1:50080
"""
if r_ip == '127.0.0.1':
match_sql_cmd = "select L.l_ip, L.l_port, L.p_cmdline, L.p_cwd, L.p_name "\
"FROM {} L where L.l_ip = {!r} and L.l_port = {!r} and L.server_uuid = {!r} limit 1".format(
connection_table, r_ip, r_port, server_uuid)
else:
match_sql_cmd = "select L.l_ip, L.l_port, L.p_cmdline, L.p_cwd, L.p_name "\
"FROM {} L where L.l_ip = {!r} and L.l_port = {!r} limit 1".format(
connection_table, r_ip, r_port)
match_node = db_fetchall(match_sql_cmd, fetch='one')
return match_node
def connection_process(connection):
"""
process connection.
if connection is a listen, drop it,
"""
c_r_ip = connection["r_ip"]
c_r_port = connection["r_port"]
if c_r_ip == 'None' or c_r_ip is None or c_r_ip == ' ':
return
else:
# c_l_ip = connection['l_ip']
# c_l_port = connection["l_port"]
c_p_name = connection['p_name']
c_p_cwd = connection['p_cwd']
c_p_cmdline = connection['p_cmdline']
c_id = connection['id']
flag = connection['flag']
c_server_uuid = connection['server_uuid']
pLogger.debug("\n{0}\nprocess id: {3} "
"connection is {1!r}, type: {2!r}, with flag {4!r}, type(flag)=> {5!r}"
.format(identify_line,
connection,
type(c_p_cmdline),
c_id,
flag,
type(flag)
)
)
c_result = node_match(c_p_name, cwd=c_p_cwd, cmdline=c_p_cmdline)
match_node = match_nodes(connection_table, c_r_ip, c_r_port, c_server_uuid)
if match_node:
pLogger.debug("match_node is : {}".format(match_node))
m_p_cmdline = match_node['p_cmdline']
m_p_cwd = match_node['p_cwd']
m_p_name = match_node['p_name']
pLogger.debug("match_node: {}, {}, {}".format(m_p_name, m_p_cwd, m_p_cmdline))
m_result = node_match(m_p_name, cwd=m_p_cwd, cmdline=m_p_cmdline)
else:
convert_node = c_r_ip + ':' + c_r_port
pLogger.debug("convert_node with port {!r}".format(convert_node))
m_result = convert_node
pLogger.debug("c_result=>{!r}, m_result=>{!r}".format(c_result, m_result))
if c_result == "drop" or m_result == 'drop':
pLogger.warn("process {} has connection {} are not needed, drop.".format(c_p_name, m_result))
pLogger.debug("drop item is : {}".format(connection))
return
else:
if flag == 1:
from_node = c_result
target_node = m_result
elif flag == 0:
from_node = m_result
target_node = c_result
else:
pLogger.error("flag is needed!")
return
# finally from_node, target_node
pLogger.debug("{}\nfrom_node :{!r} \ntarget_node: {!r}".format(identify_line, from_node, target_node))
# time.sleep(1)
return from_node.strip(), target_node.strip()
def fetch_list_process(result_tuple):
pLogger.debug('result_tuple is {!r}'.format(result_tuple))
if result_tuple:
fetch_list.append(result_tuple)
def get_relation_list_from_db():
"""
ip v,port v: connection match listen
ip v,port x: listen program exit after collect connections
ip x,port v: this explains nothing
ip x,port x: this is a out server
"""
# has listen program
connections_sql_cmd = "SELECT * FROM {}".format(
connection_table)
connections_fetchall = db_fetchall(connections_sql_cmd, fetch='all')
# pLogger.debug("connections fetch is: {!r}".format(connections_fetchall))
return connections_fetchall
@db_close
def process_ralation(connections):
# pool = multiprocessing.Pool(processes=4)
pLogger.info("Run connection_process with PID {!r}".format(os.getpid()))
now_process_num = 0
for con in connections:
now_process_num += 1
if not now_process_num % 1000:
pLogger.info("Now process No. => {!r}".format(now_process_num))
# pool.apply_async(connection_process, args=(con,), callback=fetch_list_process)
from_to_node_tuple = connection_process(con)
fetch_list_process(from_to_node_tuple)
# pLogger.info('Waiting for all processes done...')
# pool.close()
# pool.join()
pLogger.info("All processes done!")
@spend_time
@start_end_point(SCRIPT_NAME)
@script_head
def main():
try:
connections = get_relation_list_from_db()
process_ralation(connections)
edges_list = list(set(fetch_list))
pLogger.info("edges_list len {!r}".format(len(edges_list)))
pickle_to_file(edges_list, netgraph_path)
except Exception:
traceback.print_exc()
else:
load_draw()
if __name__ == "__main__":
main()
|
mattpap/sympy-polys | refs/heads/master | sympy/functions/elementary/tests/test_integers.py | 9 | from sympy import Symbol, floor, nan, oo, E, symbols, ceiling, pi, Rational, \
Real, I, sin, exp, log, factorial
from sympy.utilities.pytest import XFAIL
def test_floor():
x = Symbol('x')
y = Symbol('y', real=True)
k, n = symbols('kn', integer=True)
assert floor(nan) == nan
assert floor(oo) == oo
assert floor(-oo) == -oo
assert floor(0) == 0
assert floor(1) == 1
assert floor(-1) == -1
assert floor(E) == 2
assert floor(-E) == -3
assert floor(2*E) == 5
assert floor(-2*E) == -6
assert floor(pi) == 3
assert floor(-pi) == -4
assert floor(Rational(1, 2)) == 0
assert floor(-Rational(1, 2)) == -1
assert floor(Rational(7, 3)) == 2
assert floor(-Rational(7, 3)) == -3
assert floor(Real(17.0)) == 17
assert floor(-Real(17.0)) == -17
assert floor(Real(7.69)) == 7
assert floor(-Real(7.69)) == -8
assert floor(I) == I
assert floor(-I) == -I
assert floor(oo*I) == oo*I
assert floor(-oo*I) == -oo*I
assert floor(2*I) == 2*I
assert floor(-2*I) == -2*I
assert floor(I/2) == 0
assert floor(-I/2) == -I
assert floor(E + 17) == 19
assert floor(pi + 2) == 5
assert floor(E + pi) == floor(E + pi)
assert floor(I + pi) == floor(I + pi)
assert floor(floor(pi)) == 3
assert floor(floor(y)) == floor(y)
assert floor(floor(x)) == floor(floor(x))
assert floor(x) == floor(x)
assert floor(2*x) == floor(2*x)
assert floor(k*x) == floor(k*x)
assert floor(k) == k
assert floor(2*k) == 2*k
assert floor(k*n) == k*n
assert floor(k/2) == floor(k/2)
assert floor(x + y) == floor(x + y)
assert floor(x + 3) == floor(x + 3)
assert floor(x + k) == floor(x + k)
assert floor(y + 3) == floor(y) + 3
assert floor(y + k) == floor(y) + k
assert floor(3 + I*y + pi) == 6 + floor(y)*I
assert floor(k + n) == k + n
assert floor(x*I) == floor(x*I)
assert floor(k*I) == k*I
assert floor(Rational(23, 10) - E*I) == 2 - 3*I
assert floor(sin(1)) == 0
assert floor(sin(-1)) == -1
assert floor(exp(2)) == 7
assert floor(log(8)/log(2)) != 2
assert int(floor(log(8)/log(2)).evalf(chop=True)) == 3
assert floor(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336800
def test_ceiling():
x = Symbol('x')
y = Symbol('y', real=True)
k, n = symbols('kn', integer=True)
assert ceiling(nan) == nan
assert ceiling(oo) == oo
assert ceiling(-oo) == -oo
assert ceiling(0) == 0
assert ceiling(1) == 1
assert ceiling(-1) == -1
assert ceiling(E) == 3
assert ceiling(-E) == -2
assert ceiling(2*E) == 6
assert ceiling(-2*E) == -5
assert ceiling(pi) == 4
assert ceiling(-pi) == -3
assert ceiling(Rational(1, 2)) == 1
assert ceiling(-Rational(1, 2)) == 0
assert ceiling(Rational(7, 3)) == 3
assert ceiling(-Rational(7, 3)) == -2
assert ceiling(Real(17.0)) == 17
assert ceiling(-Real(17.0)) == -17
assert ceiling(Real(7.69)) == 8
assert ceiling(-Real(7.69)) == -7
assert ceiling(I) == I
assert ceiling(-I) == -I
assert ceiling(oo*I) == oo*I
assert ceiling(-oo*I) == -oo*I
assert ceiling(2*I) == 2*I
assert ceiling(-2*I) == -2*I
assert ceiling(I/2) == I
assert ceiling(-I/2) == 0
assert ceiling(E + 17) == 20
assert ceiling(pi + 2) == 6
assert ceiling(E + pi) == ceiling(E + pi)
assert ceiling(I + pi) == ceiling(I + pi)
assert ceiling(ceiling(pi)) == 4
assert ceiling(ceiling(y)) == ceiling(y)
assert ceiling(ceiling(x)) == ceiling(ceiling(x))
assert ceiling(x) == ceiling(x)
assert ceiling(2*x) == ceiling(2*x)
assert ceiling(k*x) == ceiling(k*x)
assert ceiling(k) == k
assert ceiling(2*k) == 2*k
assert ceiling(k*n) == k*n
assert ceiling(k/2) == ceiling(k/2)
assert ceiling(x + y) == ceiling(x + y)
assert ceiling(x + 3) == ceiling(x + 3)
assert ceiling(x + k) == ceiling(x + k)
assert ceiling(y + 3) == ceiling(y) + 3
assert ceiling(y + k) == ceiling(y) + k
assert ceiling(3 + pi + y*I) == 7 + ceiling(y)*I
assert ceiling(k + n) == k + n
assert ceiling(x*I) == ceiling(x*I)
assert ceiling(k*I) == k*I
assert ceiling(Rational(23, 10) - E*I) == 3 - 2*I
assert ceiling(sin(1)) == 1
assert ceiling(sin(-1)) == 0
assert ceiling(exp(2)) == 8
assert ceiling(-log(8)/log(2)) != -2
assert int(ceiling(-log(8)/log(2)).evalf(chop=True)) == -3
assert ceiling(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336801
@XFAIL
def test_issue_1050():
assert floor(3 + pi*I + y*I) == 3 + floor(pi+y)*I
assert floor(3*I + pi*I + y*I) == floor(3+pi+y)*I
assert floor(3 + E + pi*I + y*I) == 5 + floor(pi+y)*I
|
hynnet/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/copy.py | 253 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copy_reg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, long, float, bool, str, tuple,
frozenset, type, xrange, types.ClassType,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
for name in ("ComplexType", "UnicodeType", "CodeType"):
t = getattr(types, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[long] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[str] = _deepcopy_atomic
try:
d[unicode] = _deepcopy_atomic
except NameError:
pass
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[xrange] = _deepcopy_atomic
d[types.ClassType] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.iteritems():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.im_func, deepcopy(x.im_self, memo), x.im_class)
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
args = deepcopy(args, memo)
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.iteritems():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1==l
l1 = map(copy, l)
print l1==l
l1 = deepcopy(l)
print l1==l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
class odict(dict):
def __init__(self, d = {}):
self.a = 99
dict.__init__(self, d)
def __setitem__(self, k, i):
dict.__setitem__(self, k, i)
self.a
o = odict({"A" : "B"})
x = deepcopy(o)
print(o, x)
if __name__ == '__main__':
_test()
|
rdebroiz/presto | refs/heads/master | pipeline.py | 1 | import logging
from pprint import pformat
import path
import networkx as nx
from settings import FAIL, ENDCBOLD, PRESTO_GRAPH_FILENAME, BOLD, ENDC
from node import Root
class PipelineError(Exception):
pass
class PipelineCyclicError(PipelineError):
pass
class PipelineDependenceError(PipelineError):
pass
class Pipeline():
_root = Root()
_graph = nx.DiGraph()
_nodes = None
def __init__(self, yaml_documents):
# init with root
self._nodes = {self._root.name: self._root}
self._graph.add_node(self._root.name)
# build graph
self._build_nodes_from_documents(yaml_documents)
self._build_edges()
if self._cycle_detection():
raise PipelineCyclicError() # TODO what would be relevant here?
# refine graph
self._thin()
if self._check_nodes_parents():
raise PipelineDependenceError()
def show(self):
agraph = nx.drawing.nx_agraph.to_agraph(self._graph)
print(BOLD, agraph, ENDC)
agraph.layout(prog='dot')
agraph.draw(PRESTO_GRAPH_FILENAME)
def _build_nodes_from_documents(self, documents):
from node import Node
from yaml_io import YamlIO
from evaluator import Evaluator
from data_model import DataModel
evltr = Evaluator()
for doc in documents:
try:
if('__FILE__' in doc):
filename = path.Path(evltr.evaluate(doc['__FILE__']))
if(not filename.isabs()):
filename = DataModel.document_path / filename
d = YamlIO.load_all_yaml(filename)
self._build_nodes_from_documents(d)
else:
try:
node = Node(doc)
except BaseException:
logging.critical("Unable to build node from: \n%s",
pformat(doc))
raise
self._graph.add_node(node.name)
self._nodes[node.name] = node
except TypeError:
logging.critical("A '__FILE__' entry is probably missing in "
"the given pipe.yaml file' \n"
"(the file shouldn't end with '---')")
raise
def _build_edges(self):
for node in self._nodes:
for parent in self._nodes[node].parents:
self._graph.add_edge(parent, node)
def _cycle_detection(self):
have_cycle = False
for cycle in nx.simple_cycles(self._graph):
have_cycle = True
logging.error("Cycle found: "
"%s", pformat(cycle))
return have_cycle
def _thin(self):
"""
Remove all edges between one node and one of his parents
if this parent is already one of the ancestors of any other of his
parents.
example:
A
*
/ \
o o
B *o----* C
/ /
o /
D * /
\ /
oo
*
E
becomes
A *
|
o
C *
/ |
/ |
o |
B * |
\ |
o |
D * |
\ |
oo
E *
"""
for n in self._graph.nodes():
for cur_p in self._graph.predecessors(n):
for p in self._graph.predecessors(n):
if cur_p is p:
continue
else:
if cur_p in nx.ancestors(self._graph, p):
self._graph.remove_edge(cur_p, n)
break
def _check_nodes_parents(self):
error = 0
for node in self.nodes.values():
for parent in node.parents:
if parent not in self.nodes.keys():
msg = ("Cant find node: " +
FAIL + "'{}'".format(parent) + ENDCBOLD +
"\nDeclared in __DEPEND_ON__ for node: '{}'".format(node.name))
logging.error(msg)
error += 1
return error
def walk(self, node):
# TODO there must have a better way to do it.
# yield node
for n in nx.topological_sort(self._graph,
nx.descendants(self._graph, node.name)):
yield self._nodes[n]
@property
def root(self):
return self._root
@property
def nodes(self):
return self._nodes
|
unifycore/pcp-sdn | refs/heads/master | pcp_sdn_source/pcp_sdn/tests/test_pcpmessage.py | 1 | import unittest
from ..pcp import pcpmessage
#===============================================================================
class TestPcpMessage(unittest.TestCase):
def setUp(self):
self.pcp_client_ip = "192.168.1.1"
self.pcp_fields_request_map_common = {
'version': 2,
'message_type': pcpmessage.PcpMessageTypes.REQUEST,
'opcode': pcpmessage.PcpMessageOpcodes.MAP,
'lifetime': 300,
'pcp_client_ip': self.pcp_client_ip
}
self.pcp_fields_request_peer_common = dict(self.pcp_fields_request_map_common)
self.pcp_fields_request_peer_common['opcode'] = pcpmessage.PcpMessageOpcodes.PEER
self.pcp_fields_request_announce_common = dict(self.pcp_fields_request_map_common)
self.pcp_fields_request_announce_common['opcode'] = pcpmessage.PcpMessageOpcodes.ANNOUNCE
self.pcp_fields_response_map_common = {
'version': 2,
'message_type': pcpmessage.PcpMessageTypes.RESPONSE,
'opcode': pcpmessage.PcpMessageOpcodes.MAP,
'result_code': 1,
'lifetime': 300,
'epoch_time': 123461316
}
self.pcp_fields_map = {
'mapping_nonce': "0102030464ff8e110a090204",
'protocol': 0x11,
'internal_port': 1250,
'external_port': 5555,
'external_ip': "200.0.0.1"
}
self.pcp_fields_peer = {
'mapping_nonce': "0102030464ff8e110a090204",
'protocol': 0x11,
'internal_port': 1250,
'external_port': 5555,
'external_ip': "200.0.0.1",
'remote_peer_port': 4444,
'remote_peer_ip': "210.0.0.100"
}
self.pcp_data_request_map_common = (
'\x02\x01\x00\x00'
'\x00\x00\x01,'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc0\xa8\x01\x01'
)
data_list = list(self.pcp_data_request_map_common)
message_type = ord(data_list[1]) >> 7
data_list[1] = chr(message_type | pcpmessage.PcpMessageOpcodes.PEER)
self.pcp_data_request_peer_common = ''.join(data_list)
data_list = list(self.pcp_data_request_map_common)
message_type = ord(data_list[1]) >> 7
data_list[1] = chr(message_type | pcpmessage.PcpMessageOpcodes.ANNOUNCE)
self.pcp_data_request_announce_common = ''.join(data_list)
self.pcp_data_map = (
'\x01\x02\x03\x04d\xff\x8e\x11\n\t\x02\x04'
'\x11\x00\x00\x00'
'\x04\xe2\x15\xb3'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc8\x00\x00\x01'
)
self.pcp_data_peer = (
'\x01\x02\x03\x04d\xff\x8e\x11\n\t\x02\x04'
'\x11\x00\x00\x00'
'\x04\xe2\x15\xb3'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc8\x00\x00\x01'
'\x11\\\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xd2\x00\x00d'
)
self.pcp_data_response_map_common = (
'\x02\x81\x00\x01'
'\x00\x00\x01,'
'\x07[\xde\xc4'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
def _test_parse_pcp_opcode(self, data, fields):
pcp_message = pcpmessage.PcpMessage.parse(data, self.pcp_client_ip)
for field_name in fields.keys():
self.assertEqual(
pcp_message[field_name], fields[field_name],
msg="{0}: {1} != {2}".format(field_name, pcp_message[field_name], fields[field_name]))
def test_parse_pcp_request_map(self):
fields = self.pcp_fields_request_map_common
fields.update(self.pcp_fields_map)
self._test_parse_pcp_opcode(
self.pcp_data_request_map_common + self.pcp_data_map, fields)
def test_parse_pcp_request_peer(self):
fields = self.pcp_fields_request_peer_common
fields.update(self.pcp_fields_peer)
self._test_parse_pcp_opcode(
self.pcp_data_request_peer_common + self.pcp_data_peer, fields)
def test_parse_pcp_request_announce(self):
self._test_parse_pcp_opcode(
self.pcp_data_request_announce_common, self.pcp_fields_request_announce_common)
def test_parse_pcp_message_data_length_less_than_minimum(self):
pcp_message = pcpmessage.PcpMessage.parse('\x00', self.pcp_client_ip)
self.assertEqual(pcp_message, None)
def test_parse_pcp_message_supported_version_data_length_less_than_minimum(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common[:10], self.pcp_client_ip)
self.assertEqual(pcp_message, None)
def test_parse_pcp_message_data_length_not_multiplier_of_four(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_announce_common + '\x00', self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_message_data_length_greater_than_maximum(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_announce_common + '\x00' * 1100, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_map_invalid_data_length(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map[:10], self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_peer_invalid_data_length(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_peer[:20], self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_unsupported_version(self):
self.pcp_data_request_announce_common = '\x01' + self.pcp_data_request_announce_common[1:]
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_VERSION)
def test_parse_pcp_request_unsupported_opcode(self):
unsupported_opcode = '\x07'
self.pcp_data_request_announce_common = (self.pcp_data_request_announce_common[0] +
unsupported_opcode + self.pcp_data_request_announce_common[2:])
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_OPCODE)
def test_parse_pcp_request_ip_address_mismatch(self):
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, "192.168.1.100")
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.ADDRESS_MISMATCH)
def test_parse_pcp_request_map_malformed_request(self):
# Non-zero lifetime, zero protocol, non-zero internal port
protocol = '\x00'
self.pcp_data_map = self.pcp_data_map[:12] + protocol + self.pcp_data_map[13:]
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_map_unsupported_zero_internal_port(self):
internal_port = '\x00\x00'
self.pcp_data_map = self.pcp_data_map[:16] + internal_port + self.pcp_data_map[18:]
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_PROTOCOL)
def test_serialize_pcp_request_map(self):
fields = self.pcp_fields_request_map_common
fields.update(self.pcp_fields_map)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_request_map_common + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_request_peer(self):
fields = self.pcp_fields_request_peer_common
fields.update(self.pcp_fields_peer)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_request_peer_common + self.pcp_data_peer
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_request_announce(self):
pcp_message = pcpmessage.PcpMessage(**self.pcp_fields_request_announce_common)
expected_data = self.pcp_data_request_announce_common
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_response_map(self):
fields = self.pcp_fields_response_map_common
fields.update(self.pcp_fields_map)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_response_map_common + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_response_unsuccessful_result_code_copy_pcp_client_ip_addr_part(self):
pcp_message = pcpmessage.PcpMessage(**self.pcp_fields_response_map_common)
pcp_message.update(self.pcp_fields_map)
pcp_message['pcp_client_ip'] = "192.168.1.1"
pcp_response_data = self.pcp_data_response_map_common[:len(self.pcp_data_response_map_common)-12]
pcp_response_data += '\x00\x00\x00\x00\x00\x00\xff\xff\xc0\xa8\x01\x01'
expected_data = pcp_response_data + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
|
patriciolobos/desa8 | refs/heads/master | openerp/addons/product/report/__init__.py | 452 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
glaubitz/fs-uae-debian | refs/heads/master | launcher/OpenGL/WGL/EXT/pbuffer.py | 8 | '''OpenGL extension EXT.pbuffer
This module customises the behaviour of the
OpenGL.raw.WGL.EXT.pbuffer to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/pbuffer.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.EXT.pbuffer import *
from OpenGL.raw.WGL.EXT.pbuffer import _EXTENSION_NAME
def glInitPbufferEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
robert-kisteleki/ripe-atlas-tools | refs/heads/master | tests/renderers/ssl_consistency.py | 2 | # Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
try:
from unittest import mock # Python 3.4+
except ImportError:
import mock
from ripe.atlas.cousteau import Probe as CProbe
from ripe.atlas.tools.helpers.rendering import SaganSet
from ripe.atlas.tools.commands.report import Command
from ripe.atlas.tools.renderers.ssl_consistency import Renderer
from ..base import capture_sys_output
class TestSSLConsistency(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.results = [
{u'rt': 737.834, u'msm_id': 1443369, u'from': u'210.6.135.152', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727294, u'fw': 4570, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1003, u'dst_port': u'443', u'dst_addr': u'86.59.30.40', u'ttc': 355.078, u'src_addr': u'192.168.1.182', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 496.488, u'msm_id': 1443369, u'from': u'218.254.74.122', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727295, u'fw': 4570, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1004, u'dst_port': u'443', u'dst_addr': u'38.229.72.14', u'ttc': 239.37, u'src_addr': u'192.168.1.100', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'from': u'183.62.4.188', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4600, u'timestamp': 1392727294, u'af': 4, u'dst_addr': u'216.234.179.13', u'prb_id': 1008, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'rt': 658.671, u'msm_id': 1443369, u'from': u'113.255.77.244', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727293, u'fw': 4570, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1033, u'dst_port': u'443', u'dst_addr': u'82.195.75.101', u'ttc': 324.6, u'src_addr': u'192.168.0.183', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 518.082, u'msm_id': 1443369, u'from': u'203.218.168.183', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727295, u'fw': 4600, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1038, u'dst_port': u'443', u'dst_addr': u'38.229.72.14', u'ttc': 244.924, u'src_addr': u'192.168.95.248', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 615.64, u'msm_id': 1443369, u'from': u'103.244.157.214', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727295, u'fw': 4600, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1047, u'dst_port': u'443', u'dst_addr': u'93.95.227.222', u'ttc': 302.403, u'src_addr': u'192.168.6.137', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'from': u'116.237.191.131', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4600, u'timestamp': 1392727292, u'af': 4, u'dst_addr': u'209.145.54.50', u'prb_id': 11572, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'from': u'116.247.110.182', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4580, u'timestamp': 1392727294, u'af': 4, u'dst_addr': u'202.181.7.85', u'prb_id': 11700, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'rt': 673.665, u'msm_id': 1443369, u'from': u'103.6.131.59', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727294, u'fw': 4580, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 12203, u'dst_port': u'443', u'dst_addr': u'86.59.30.40', u'ttc': 326.088, u'src_addr': u'103.6.131.59', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 768.923, u'msm_id': 1443369, u'from': u'103.15.180.250', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727296, u'fw': 4580, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 12208, u'dst_port': u'443', u'dst_addr': u'93.95.227.222', u'ttc': 381.856, u'src_addr': u'103.15.180.250', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'dnserr': u'non-recoverable failure in name resolution (1)', u'from': u'103.6.129.113', u'dst_name': u'torproject.org', u'fw': 4580, u'timestamp': 1392727293, u'prb_id': 12214, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'rt': 514.766, u'msm_id': 1443369, u'from': u'158.132.255.55', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727293, u'fw': 4580, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 13026, u'dst_port': u'443', u'dst_addr': u'93.95.227.222', u'ttc': 252.836, u'src_addr': u'192.168.1.185', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 868.668, u'msm_id': 1443369, u'from': u'116.231.230.67', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727294, u'fw': 4580, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 13377, u'dst_port': u'443', u'dst_addr': u'82.195.75.101', u'ttc': 431.422, u'src_addr': u'10.0.10.39', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 471.564, u'msm_id': 1443369, u'from': u'103.247.138.60', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727304, u'fw': 4600, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 1386, u'dst_port': u'443', u'dst_addr': u'38.229.72.16', u'ttc': 228.561, u'src_addr': u'103.247.138.60', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'rt': 1161.992, u'msm_id': 1443369, u'from': u'193.162.21.10', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727296, u'fw': 4600, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIID/TCCAuWgAwIBAgIkNhPSsip1AJR2DEGtGdtSpPBb3qgBcuJXh2Gtln9+2aoo\nacTlMA0GCSqGSIb3DQEBBQUAMGAxCzAJBgNVBAYTAkVVMQswCQYDVQQIEwJFVTEL\nMAkGA1UEBxMCRVUxDDAKBgNVBAoTA0RTVjEMMAoGA1UECxMDRFNWMRswGQYDVQQD\nExJhcGFjLnByb3h5LmRzdi5jb20wHhcNMTMwMTI5MDAwMDAwWhcNMTYwNTAzMTIw\nMDAwWjByMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEQMA4G\nA1UEBxMHV2FscG9sZTEeMBwGA1UEChMVVGhlIFRvciBQcm9qZWN0LCBJbmMuMRkw\nFwYDVQQDDBAqLnRvcnByb2plY3Qub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAoGZlXATizI+cktME+tx2rjLndU2i0ATLGmzuhlxqwQ6RiFasuMkp\nfJcQp64tETk7mXxRWFGWVHiUFjCoeSQtO9Y0gxUk4mLTHPuh5nE8V3BzaDQ6sO9R\nPBXhVlTnbUatAitvgKM5PZnAgT+hi3ZcByGERBys/eITyCI/Uh9HZFW7Q2mUBcjr\n3/u36xeQdKMSH8hyq06I7HpRRwZpmv3T67jBQC6vo62rBnfGQ3vTcSVOmDoUOjD3\nGF1P/Ko7ch4Ao2P9/+T8/tNooJOPBK68VL168PTYRgG9Vyeh4hU2ipP/dd9DqoR+\nt3UwNO6o8jQmNSxdB/rjMcUtS7enSy2P7wIDAQABo4GMMIGJMB0GA1UdDgQWBBSx\n87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREEJDAighAqLnRvcnByb2plY3Qub3Jn\ngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEFBQADggEB\nAHEvFwq+/VeBSa56orsVzq9Af34ZuTTST7zAUjfpDSYr/VjmHB4n0Uq80/cRWlm9\nDhRafM36IEk7tYpCN5tK4MYyMVg3FUESDsx37ZNDTpUiVSQPYjxpWQ7I/1kz7Sua\n6f/WQElnghZqa8Fj0s7zL/z6OxhDQMBjZvUzxtlsPhWjryo14COmQhsxW9b2uQA9\n14fPo4AcoFcmze/NAz4oMGyhsGKXwbkXFS5S2fW3ncSTxZBKJV+h8uslvB3owLvm\nhyqTwMk2iY8Z0jWDzR24jL76hPM3MWdFSSZzSkRLASEAcc/M/B50BcLfIFUSizNq\n4UoEZ4u6z2ZSChVJ3nHHaI8=\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIF2jCCBMKgAwIBAgIKFA2gCwABAAZXojANBgkqhkiG9w0BAQUFADBIMRMwEQYK\nCZImiZPyLGQBGRYDQ09NMRMwEQYKCZImiZPyLGQBGRYDRFNWMRwwGgYDVQQDExNE\nU1YgRW50ZXJwcmlzZSBDQSAxMB4XDTEzMDMwNTA4MDIzMFoXDTE1MDMwNTA4MTIz\nMFowYDELMAkGA1UEBhMCRVUxCzAJBgNVBAgTAkVVMQswCQYDVQQHEwJFVTEMMAoG\nA1UEChMDRFNWMQwwCgYDVQQLEwNEU1YxGzAZBgNVBAMTEmFwYWMucHJveHkuZHN2\nLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM+mKjGl6lE+FEdk\nBOwxK4G0EdFOoFV1T43aCZHUgyVUJAz9Y3GiibGTcqrTqI6uMOnA6oxGGol28/JN\nr/ImPVThmOgcMuLJqvty9wbimHdpF4IYPZ2uHYApgJvhXoASQMfpsTxaCghHVreE\nvoa7p1G+crLp5PSZITyM4M2jcYucU7Q+V9ITBlEV1PaKB6jZIRDxuM+sBm9U6yVr\n/xVWBOLqTDw58ZmTtivqJwfn6G9OduKfjw1dK9xUCou8r9gBo+2iz9iQx+XuT27n\nYr2au4vHYJ0iwjX6d0QZVA4LS7fsyKlseY3mkPIA35GK0jaT35ym+KbQ2tjj2Uga\nYeB8mZ8CAwEAAaOCAqwwggKoMB0GA1UdDgQWBBQhlpFR3YPNERoEzLL7pAtqbCla\nnzAfBgNVHSMEGDAWgBTunBHm4QOkKHIfM0iu35w3p6lCcDCCAQsGA1UdHwSCAQIw\ngf8wgfyggfmggfaGgb5sZGFwOi8vL0NOPURTViUyMEVudGVycHJpc2UlMjBDQSUy\nMDEoMSksQ049STAwMTE1LENOPUNEUCxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNl\ncyxDTj1TZXJ2aWNlcyxDTj1Db25maWd1cmF0aW9uLERDPURTVixEQz1DT00/Y2Vy\ndGlmaWNhdGVSZXZvY2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3Ry\naWJ1dGlvblBvaW50hjNodHRwOi8vcGtpLmRzdi5jb20vRFNWJTIwRW50ZXJwcmlz\nZSUyMENBJTIwMSgxKS5jcmwwggEZBggrBgEFBQcBAQSCAQswggEHMIG0BggrBgEF\nBQcwAoaBp2xkYXA6Ly8vQ049RFNWJTIwRW50ZXJwcmlzZSUyMENBJTIwMSxDTj1B\nSUEsQ049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29u\nZmlndXJhdGlvbixEQz1EU1YsREM9Q09NP2NBQ2VydGlmaWNhdGU/YmFzZT9vYmpl\nY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5ME4GCCsGAQUFBzAChkJodHRw\nOi8vcGtpLmRzdi5jb20vSTAwMTE1LmRzdi5jb21fRFNWJTIwRW50ZXJwcmlzZSUy\nMENBJTIwMSgxKS5jcnQwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwDwYDVR0T\nAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEFBQADggEBAJ+1\nbzf71F62JifL7uE08684cmT9sn5nmJLJjk5QEB6Fr+iCD9vafqG2XEjWbensyLhH\nPY/auJUSF7cdzu+l6jGFYvyjxM3AldQDlstzuhwcl6b0UgqOwfFTxkRzTCBobAUq\nkBgV60vXdJ2f29jPYk9qe0LUr8/01Yd8/f7JHkkjolmeBtGomXfNBhhxv7hW8/xN\neXtUTAE1yrbdiJhV5rGCTtWcAHDLzFo/sL4WbQPK5VqMzhUvJpf+zL2nyQ548kzk\nRLlSVBX+Jghb0Z9feRG+dCG5jr49farmtxI/J1BbCQHOinbtwufoW6skJUv9dzx4\nnzi9veZZknp8KIDEW6A=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 2844, u'dst_port': u'443', u'dst_addr': u'38.229.72.16', u'ttc': 3.042, u'src_addr': u'145.218.88.254', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'from': u'210.22.91.254', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4570, u'timestamp': 1392727297, u'af': 4, u'dst_addr': u'4.36.66.178', u'prb_id': 3386, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'from': u'180.154.141.170', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4600, u'timestamp': 1392727296, u'af': 4, u'dst_addr': u'209.145.54.50', u'prb_id': 3909, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'from': u'180.154.141.170', u'dst_name': u'torproject.org', u'err': u'connect: timeout', u'fw': 4600, u'timestamp': 1392727295, u'af': 4, u'dst_addr': u'216.234.179.13', u'prb_id': 3914, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369},
{u'rt': 420.604, u'msm_id': 1443369, u'from': u'27.50.33.14', u'dst_name': u'torproject.org', u'af': 4, u'timestamp': 1392727295, u'fw': 4600, u'cert': [u'-----BEGIN CERTIFICATE-----\nMIIGujCCBaKgAwIBAgIQBt6X5R3DncJkjaxy3UEB/DANBgkqhkiG9w0BAQsFADBm\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSUwIwYDVQQDExxEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBDQS0zMB4XDTEzMDEyOTAwMDAwMFoXDTE2MDUwMzEyMDAwMFowcjELMAkGA1UE\nBhMCVVMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxEDAOBgNVBAcTB1dhbHBvbGUx\nHjAcBgNVBAoTFVRoZSBUb3IgUHJvamVjdCwgSW5jLjEZMBcGA1UEAwwQKi50b3Jw\ncm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN1oOe8B\n0kT0l6MXsIOWiBiXqLfGCk8nIeX+GXc0TNez14HBWPOzLMmA6Kfj3h9kJ0hLCzlS\nGui3xsT1ca5ZXONP/2beDkIoxwF+7/MCS8gOu4Cyua0CjR0ce6YWemKYVKxoqJvY\nH/S2UnzMHaBI/bhJ+QK5kMYg/JXoMx9IMIJnjl9clFt3TE34UR5/NZTsytXAtCjI\n5qMSpzKRE31RREGv1kxwTqJq/g5UFJWzZEwISDEhTeFTVOru0qjbEAqaip4hQH9D\nITjDOFw7Upgdab4TN4gLwDaZuo+Qcz+CQR6vCSlP2KziQAH9nlU+qT81eYVv+NOf\njogvdu/Atl/q+z0CAwEAAaOCA1YwggNSMB8GA1UdIwQYMBaAFFDqc4nbKfsQj57l\nASDU3nmZSIP3MB0GA1UdDgQWBBSx87Iq0fmAeNURYjYpnSG8riduZjArBgNVHREE\nJDAighAqLnRvcnByb2plY3Qub3Jngg50b3Jwcm9qZWN0Lm9yZzAOBgNVHQ8BAf8E\nBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGEGA1UdHwRaMFgw\nKqAooCaGJGh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9jYTMtZzE4LmNybDAqoCig\nJoYkaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL2NhMy1nMTguY3JsMIIBxAYDVR0g\nBIIBuzCCAbcwggGzBglghkgBhv1sAQEwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8v\nd3d3LmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYB\nBQUHAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMA\nZQByAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEA\nYwBjAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIA\ndAAgAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcA\nIABQAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwA\naQBtAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkA\nbgBjAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUA\nZgBlAHIAZQBuAGMAZQAuMHsGCCsGAQUFBwEBBG8wbTAkBggrBgEFBQcwAYYYaHR0\ncDovL29jc3AuZGlnaWNlcnQuY29tMEUGCCsGAQUFBzAChjlodHRwOi8vY2FjZXJ0\ncy5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNlQ0EtMy5jcnQwDAYD\nVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAFfAsIxhBxzSVi5a9FpEp9JGc\n0wL5/4BVFv0lKYjHkRVoBdvN3gnAfGt2YXrAJZb7OCVwW3KFdSaTwm8T10eCVSXX\nASTrp6DWs6mHxw9HGIkVF9YESq6x5/ZGHDTovuRMCeHuIwn+nBL21z1WDqwozwcQ\nAxNXeRXJvXO4bOj301+26as9cOWjonGzkW9uc3WTWp89+YOpRo6RQ59Yc3UJlxjW\nHZR3Oqp/GM1jo2NPHnFeMpnFtVj+uuQBtNj7D7jiWhGtNxFIePizOBs8k+ao9lWO\nE2UHK5iM17YISRhBPNwi4YL+nf+jo5untE6WgvFYhEH2pwmCSKrIYBdGatbxfw==\n-----END CERTIFICATE-----', u'-----BEGIN CERTIFICATE-----\nMIIGWDCCBUCgAwIBAgIQCl8RTQNbF5EX0u/UA4w/OzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA4MDQwMjEyMDAwMFoXDTIyMDQwMzAwMDAwMFowZjEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTElMCMGA1UEAxMcRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nQ0EtMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9hCikQH17+NDdR\nCPge+yLtYb4LDXBMUGMmdRW5QYiXtvCgFbsIYOBC6AUpEIc2iihlqO8xB3RtNpcv\nKEZmBMcqeSZ6mdWOw21PoF6tvD2Rwll7XjZswFPPAAgyPhBkWBATaccM7pxCUQD5\nBUTuJM56H+2MEb0SqPMV9Bx6MWkBG6fmXcCabH4JnudSREoQOiPkm7YDr6ictFuf\n1EutkozOtREqqjcYjbTCuNhcBoz4/yO9NV7UfD5+gw6RlgWYw7If48hl66l7XaAs\nzPw82W3tzPpLQ4zJ1LilYRyyQLYoEt+5+F/+07LJ7z20Hkt8HEyZNp496+ynaF4d\n32duXvsCAwEAAaOCAvowggL2MA4GA1UdDwEB/wQEAwIBhjCCAcYGA1UdIASCAb0w\nggG5MIIBtQYLYIZIAYb9bAEDAAIwggGkMDoGCCsGAQUFBwIBFi5odHRwOi8vd3d3\nLmRpZ2ljZXJ0LmNvbS9zc2wtY3BzLXJlcG9zaXRvcnkuaHRtMIIBZAYIKwYBBQUH\nAgIwggFWHoIBUgBBAG4AeQAgAHUAcwBlACAAbwBmACAAdABoAGkAcwAgAEMAZQBy\nAHQAaQBmAGkAYwBhAHQAZQAgAGMAbwBuAHMAdABpAHQAdQB0AGUAcwAgAGEAYwBj\nAGUAcAB0AGEAbgBjAGUAIABvAGYAIAB0AGgAZQAgAEQAaQBnAGkAQwBlAHIAdAAg\nAEMAUAAvAEMAUABTACAAYQBuAGQAIAB0AGgAZQAgAFIAZQBsAHkAaQBuAGcAIABQ\nAGEAcgB0AHkAIABBAGcAcgBlAGUAbQBlAG4AdAAgAHcAaABpAGMAaAAgAGwAaQBt\nAGkAdAAgAGwAaQBhAGIAaQBsAGkAdAB5ACAAYQBuAGQAIABhAHIAZQAgAGkAbgBj\nAG8AcgBwAG8AcgBhAHQAZQBkACAAaABlAHIAZQBpAG4AIABiAHkAIAByAGUAZgBl\nAHIAZQBuAGMAZQAuMBIGA1UdEwEB/wQIMAYBAf8CAQAwNAYIKwYBBQUHAQEEKDAm\nMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wgY8GA1UdHwSB\nhzCBhDBAoD6gPIY6aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0SGln\naEFzc3VyYW5jZUVWUm9vdENBLmNybDBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNl\ncnQuY29tL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDAfBgNVHSME\nGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzAdBgNVHQ4EFgQUUOpzidsp+xCPnuUB\nINTeeZlIg/cwDQYJKoZIhvcNAQEFBQADggEBAB7ipUiebNtTOA/vphoqrOIDQ+2a\nvD6OdRvw/S4iWawTwGHi5/rpmc2HCXVUKL9GYNy+USyS8xuRfDEIcOI3ucFbqL2j\nCwD7GhX9A61YasXHJJlIR0YxHpLvtF9ONMeQvzHB+LGEhtCcAarfilYGzjrpDq6X\ndF3XcZpCdF/ejUN83ulV7WkAywXgemFhM9EZTfkI7qA5xSU1tyvED7Ld8aW3DiTE\nJiiNeXf1L/BXunwH1OH8zVowV36GEEfdMR/X/KLCvzB8XSSq6PmuX2p0ws5rs0bY\nIb4p1I5eFdZCSucyb6Sxa1GDWL4/bcf72gMhy2oWGU4K8K2Eyl2Us1p292E=\n-----END CERTIFICATE-----'], u'method': u'SSL', u'prb_id': 400, u'dst_port': u'443', u'dst_addr': u'38.229.72.14', u'ttc': 203.67, u'src_addr': u'27.50.33.14', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'ver': u'3.0'},
{u'dnserr': u'non-recoverable failure in name resolution (1)', u'from': u'211.68.70.39', u'dst_name': u'torproject.org', u'fw': 4600, u'timestamp': 1392727294, u'prb_id': 483, u'dst_port': u'443', u'group_id': 1443369, u'type': u'sslcert', u'msm_name': u'SSLCert', u'msm_id': 1443369}
]
cls.probes = {
1003: CProbe(id=1003, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
1004: CProbe(id=1004, meta_data={
"country_code": "DE", "asn_v4": 3333, "asn_v6": "4444"}),
1008: CProbe(id=1008, meta_data={
"country_code": "DE", "asn_v4": 3332, "asn_v6": "4444"}),
1033: CProbe(id=1033, meta_data={
"country_code": "NL", "asn_v4": 3333, "asn_v6": "4444"}),
1038: CProbe(id=1038, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
1047: CProbe(id=1047, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
11672: CProbe(id=11572, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
11700: CProbe(id=11700, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
12203: CProbe(id=12203, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
12208: CProbe(id=12208, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
12214: CProbe(id=12214, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
13026: CProbe(id=13026, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
13377: CProbe(id=13377, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
1386: CProbe(id=1386, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
2844: CProbe(id=2844, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
3386: CProbe(id=3386, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
3909: CProbe(id=3909, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
3914: CProbe(id=3914, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
400: CProbe(id=400, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
483: CProbe(id=483, meta_data={
"country_code": "GR", "asn_v4": 3333, "asn_v6": "4444"}),
}
def test_additional(self):
"""Tests whole functionality of additional unit."""
expected_output = (
"Certificate:\n"
" Issuer: C=US, O=DigiCert Inc, CN=DigiCert High Assurance CA-3\n"
" Subject: C=US, O=The Tor Project, Inc., CN=*.torproject.org\n"
" SHA256 Fingerprint=36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA\n\n"
" Seen by 11 probes\n\n"
"Certificate:\n"
" Issuer: C=US, O=DigiCert Inc, CN=DigiCert High Assurance EV Root CA\n"
" Subject: C=US, O=DigiCert Inc, CN=DigiCert High Assurance CA-3\n"
" SHA256 Fingerprint=21:EB:37:AB:4C:F6:EF:89:65:EC:17:66:40:9C:A7:6B:8B:2E:03:F2:D1:A3:88:DF:73:42:08:E8:6D:EE:E6:79\n\n"
" Seen by 11 probes\n\n"
"Certificate:\n"
" Issuer: C=None, O=None, CN=DSV Enterprise CA 1\n"
" Subject: C=EU, O=DSV, CN=apac.proxy.dsv.com\n"
" SHA256 Fingerprint=1A:B8:9E:ED:1B:DD:A0:E2:EA:67:89:C1:C5:4B:20:1C:49:9D:74:27:B0:5D:11:F2:9A:5F:C1:0D:F9:18:48:DA\n\n"
" Seen by 1 probe\n\n"
" Below the threshold (80%)\n"
" Probes that saw it: \n"
" ID: 2844, country code: GR, ASN (v4/v6): 3333/4444\n"
"Certificate:\n"
" Issuer: C=EU, O=DSV, CN=apac.proxy.dsv.com\n"
" Subject: C=US, O=The Tor Project, Inc., CN=*.torproject.org\n"
" SHA256 Fingerprint=07:52:BE:65:72:BF:02:D4:C9:E2:93:09:A8:E0:BE:3A:EA:D4:30:41:B8:49:FA:C5:F2:12:33:07:37:57:EE:C7\n\n"
" Seen by 1 probe\n\n"
" Below the threshold (80%)\n"
" Probes that saw it: \n"
" ID: 2844, country code: GR, ASN (v4/v6): 3333/4444\n"
)
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
self.assertEquals(
set(Renderer().additional(SaganSet(self.results)).split("\n")),
set(expected_output.split("\n"))
)
def test_gather_unique_certs(self):
"""Test gathering of the unique certs in sagans set"""
expected_certs = {
'1A:B8:9E:ED:1B:DD:A0:E2:EA:67:89:C1:C5:4B:20:1C:49:9D:74:27:B0:5D:11:F2:9A:5F:C1:0D:F9:18:48:DA': {'cnt': 1, 'probes': [self.probes[2844]]},
'36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA': {
'cnt': 11, 'probes': [self.probes[1003], self.probes[1004], self.probes[1033], self.probes[1038], self.probes[1047], self.probes[12203], self.probes[12208], self.probes[13026], self.probes[13377], self.probes[1386], self.probes[400]]
},
'21:EB:37:AB:4C:F6:EF:89:65:EC:17:66:40:9C:A7:6B:8B:2E:03:F2:D1:A3:88:DF:73:42:08:E8:6D:EE:E6:79': {
'cnt': 11, 'probes': [self.probes[1003], self.probes[1004], self.probes[1033], self.probes[1038], self.probes[1047], self.probes[12203], self.probes[12208], self.probes[13026], self.probes[13377], self.probes[1386], self.probes[400]]
},
'07:52:BE:65:72:BF:02:D4:C9:E2:93:09:A8:E0:BE:3A:EA:D4:30:41:B8:49:FA:C5:F2:12:33:07:37:57:EE:C7': {'cnt': 1, 'probes': [self.probes[2844]]}
}
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet(self.results)
renderer = Renderer()
renderer.gather_unique_certs(sagans)
# remove cert key to make easier to test
for k in renderer.uniqcerts:
renderer.uniqcerts[k].pop("cert")
self.assertEquals(renderer.uniqcerts, expected_certs)
def test_bucketize_result_cert(self):
"""Tests bucketize of a single sagan result"""
expected_certs = {
'36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA': {'cnt': 1, 'probes': [self.probes[1003]]},
'21:EB:37:AB:4C:F6:EF:89:65:EC:17:66:40:9C:A7:6B:8B:2E:03:F2:D1:A3:88:DF:73:42:08:E8:6D:EE:E6:79': {'cnt': 1, 'probes': [self.probes[1003]]}
}
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet(self.results)
renderer = Renderer()
renderer.bucketize_result_cert(list(sagans)[0])
# remove cert key to make easier to test
for k in renderer.uniqcerts:
renderer.uniqcerts[k].pop("cert")
self.assertEquals(renderer.uniqcerts, expected_certs)
def test_get_nprobes_ofpopular_cert(self):
"""Tests fetching the number of probes for the most popular cert"""
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet(self.results)
renderer = Renderer()
renderer.gather_unique_certs(sagans)
self.assertEquals(renderer.get_nprobes_ofpopular_cert(), 11)
def test_get_nprobes_ofpopular_cert_empty(self):
"""Tests that getting the number of probes for popular certs does not
throw an error when there are no valid certificates."""
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet([])
renderer = Renderer()
renderer.gather_unique_certs(sagans)
self.assertEquals(renderer.get_nprobes_ofpopular_cert(), 0)
def test_render_certificate(self):
"""Tests rendering of single certificate."""
expected_output = (
"Certificate:\n"
" Issuer: C=US, O=DigiCert Inc, CN=DigiCert High Assurance CA-3\n"
" Subject: C=US, O=The Tor Project, Inc., CN=*.torproject.org\n"
" SHA256 Fingerprint=36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA\n\n"
" Seen by 11 probes\n"
)
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet(self.results)
renderer = Renderer()
renderer.gather_unique_certs(sagans)
self.assertEquals(
renderer.render_certificate("36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA"),
expected_output
)
def test_render_below_thershold(self):
"""Tests rendering string for certs that are seen for below threshold probes."""
expected_output = [
" Below the threshold (80%)",
" Probes that saw it: ",
" ID: 2844, country code: GR, ASN (v4/v6): 3333/4444"
]
path = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(path) as mock_get_many:
mock_get_many.return_value = self.probes.values()
sagans = SaganSet(self.results)
renderer = Renderer()
renderer.gather_unique_certs(sagans)
self.assertEquals(
renderer.render_below_threshold("07:52:BE:65:72:BF:02:D4:C9:E2:93:09:A8:E0:BE:3A:EA:D4:30:41:B8:49:FA:C5:F2:12:33:07:37:57:EE:C7"),
expected_output
)
def test_report_with_ssl_consistency_renderer(self):
"""Tests the report with the ssl renderer."""
results = self.results[:2]
expected_output = (
"Certificate:\n"
" Issuer: C=US, O=DigiCert Inc, CN=DigiCert High Assurance CA-3\n"
" Subject: C=US, O=The Tor Project, Inc., CN=*.torproject.org\n"
" SHA256 Fingerprint=36:13:D2:B2:2A:75:00:94:76:0C:41:AD:19:DB:52:A4:F0:5B:DE:A8:01:72:E2:57:87:61:AD:96:7F:7E:D9:AA\n\n"
" Seen by 2 probes\n\n"
"Certificate:\n"
" Issuer: C=US, O=DigiCert Inc, CN=DigiCert High Assurance EV Root CA\n"
" Subject: C=US, O=DigiCert Inc, CN=DigiCert High Assurance CA-3\n"
" SHA256 Fingerprint=21:EB:37:AB:4C:F6:EF:89:65:EC:17:66:40:9C:A7:6B:8B:2E:03:F2:D1:A3:88:DF:73:42:08:E8:6D:EE:E6:79\n\n"
" Seen by 2 probes\n\n"
)
with capture_sys_output() as (stdout, stderr):
path = 'ripe.atlas.cousteau.AtlasRequest.get'
with mock.patch(path) as mock_get:
mock_get.side_effect = [
(True, results)
]
mpath = 'ripe.atlas.tools.helpers.rendering.Probe.get_many'
with mock.patch(mpath) as mock_get_many:
mock_get_many.return_value = [self.probes[1003], self.probes[1004]]
cmd = Command()
cmd.init_args(["1", "--renderer", "ssl_consistency"])
cmd.run()
expected_set = set(expected_output.split("\n"))
returned_set = set(stdout.getvalue().split("\n"))
self.assertEquals(returned_set, expected_set)
|
kadamski/func | refs/heads/master | func/overlord/base_command.py | 2 | #!/usr/bin/python
"""
Copyright 2008, Red Hat, Inc
Adrian Likins <alikins@redhat.com>
also see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import command
import client
DEFAULT_PORT = 51234
DEFAULT_MAPLOC = "/var/lib/func/map"
class BaseCommand(command.Command):
""" wrapper class for commands with some convience functions, namely
getOverlord() for getting a overlord client api handle"""
interactive = False
verbose=0
port=DEFAULT_PORT
async=False
forks=1
delegate=False
mapfile=DEFAULT_MAPLOC
def getOverlord(self):
self.overlord_obj = client.Overlord(self.server_spec,
port=self.port,
interactive=self.interactive,
verbose=self.verbose,
config=self.config,
async=self.async,
nforks=self.forks,
delegate=self.delegate,
mapfile=self.mapfile)
|
fernandog/Medusa | refs/heads/optimized | ext/github/RateLimit.py | 1 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.Rate
class RateLimit(github.GithubObject.NonCompletableGithubObject):
"""
This class represents RateLimits. The reference can be found here http://developer.github.com/v3/rate_limit
"""
def __repr__(self):
return self.get__repr__({"rate": self._rate.value})
@property
def rate(self):
"""
:type: class:`github.Rate.Rate`
"""
return self._rate.value
def _initAttributes(self):
self._rate = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "rate" in attributes: # pragma no branch
self._rate = self._makeClassAttribute(github.Rate.Rate, attributes["rate"])
|
tommyip/zulip | refs/heads/master | zilencer/forms.py | 70 | from django import forms
class EnterpriseToSForm(forms.Form):
full_name = forms.CharField(max_length=100)
company = forms.CharField(max_length=100)
terms = forms.BooleanField(required=True)
|
TravelModellingGroup/TMGToolbox | refs/heads/dev-1.8 | TMGToolbox/src/XTMF_internal/return_boardings_and_WAW.py | 1 | #---LICENSE----------------------
'''
Copyright 2015 Travel Modelling Group, Department of Civil Engineering, University of Toronto
This file is part of the TMG Toolbox.
The TMG Toolbox is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The TMG Toolbox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>.
'''
#---METADATA---------------------
'''
Return Boardings and WAW
Authors: pkucirek
Latest revision by: pkucirek
Returns a 'serialized' (e.g. string repr) of transit line boardings to XTMF,
plus WAW numbers.
See XTMF for documentation.
'''
#---VERSION HISTORY
'''
0.0.1 Created on 2014-02-05 by pkucirek
0.1.0 Upgraded to work with get_attribute_values (partial read)
0.1.1 Updated to allow for multi-threaded matrix calcs in 4.2.1+
'''
import inro.modeller as _m
import traceback as _traceback
from contextlib import contextmanager
from contextlib import nested
from json import loads
from multiprocessing import cpu_count
_MODELLER = _m.Modeller() #Instantiate Modeller once.
_util = _MODELLER.module('tmg.common.utilities')
_tmgTPB = _MODELLER.module('tmg.common.TMG_tool_page_builder')
strategyAnalysisTool = _MODELLER.tool('inro.emme.transit_assignment.extended.strategy_based_analysis')
matrixCalculator = _MODELLER.tool('inro.emme.matrix_calculation.matrix_calculator')
EMME_VERSION = _util.getEmmeVersion(tuple)
##########################################################################################################
class ReturnBoardingsAndWAW(_m.Tool()):
version = '0.1.1'
tool_run_msg = ""
number_of_tasks = 1 # For progress reporting, enter the integer number of tasks here
# Tool Input Parameters
# Only those parameters necessary for Modeller and/or XTMF to dock with
# need to be placed here. Internal parameters (such as lists and dicts)
# get initialized during construction (__init__)
xtmf_ScenarioNumber = _m.Attribute(int) # parameter used by XTMF only
xtmf_LineAggregationFile = _m.Attribute(str)
xtmf_ExportWAW = _m.Attribute(bool)
NumberOfProcessors = _m.Attribute(int)
def __init__(self):
#---Init internal variables
self.TRACKER = _util.ProgressTracker(self.number_of_tasks) #init the ProgressTracker
self.NumberOfProcessors = cpu_count()
def page(self):
pb = _m.ToolPageBuilder(self, title="Return Boardings",
description="Cannot be called from Modeller.",
runnable=False,
branding_text="XTMF")
return pb.render()
##########################################################################################################
def __call__(self, xtmf_ScenarioNumber, xtmf_LineAggregationFile, xtmf_ExportWAW):
_m.logbook_write("Extracting boarding results")
#---1 Set up scenario
scenario = _m.Modeller().emmebank.scenario(xtmf_ScenarioNumber)
if (scenario is None):
raise Exception("Scenario %s was not found!" %xtmf_ScenarioNumber)
if not scenario.has_transit_results:
raise Exception("Scenario %s does not have transit assignment results" %xtmf_ScenarioNumber)
self.xtmf_LineAggregationFile = xtmf_LineAggregationFile
self.xtmf_ExportWAW = xtmf_ExportWAW
try:
return self._Execute(scenario)
except Exception as e:
msg = str(e) + "\n" + _traceback.format_exc()
raise Exception(msg)
##########################################################################################################
def _Execute(self, scenario):
lineAggregation = self._LoadLineAggregationFile()
lineBoardings = self._GetLineResults(scenario)
#netSet = set([key for key in lineBoardings.iterkeys()])
#self._CheckAggregationFile(netSet, lineAggregation)
self.TRACKER.completeTask()
results = {}
self.TRACKER.startProcess(len(lineBoardings))
for lineId, lineCount in lineBoardings.iteritems():
if not lineId in lineAggregation:
self.TRACKER.completeSubtask()
continue #Skip unmapped lines
lineGroupId = lineAggregation[lineId]
if lineGroupId in results:
results[lineGroupId] += lineCount
else:
results[lineGroupId] = lineCount
self.TRACKER.completeSubtask()
print "Loaded transit line boardings"
if self.xtmf_ExportWAW:
results['Walk-all-way'] = self._GetWalkAllWayMatrix(scenario)
print "Loaded transit walk-all-way numbers"
return str(results)
def _LoadLineAggregationFile(self):
mapping = {}
with open(self.xtmf_LineAggregationFile) as reader:
reader.readline()
for line in reader:
cells = line.strip().split(',')
key = cells[0].strip()
val = cells[1].strip()
mapping[key] = val
return mapping
def _GetLineResults(self, scenario):
results = _util.fastLoadSummedSegmentAttributes(scenario, ['transit_boardings'])
retVal = {}
for lineId, attributes in results.iteritems():
id = str(lineId)
retVal[id] = attributes['transit_boardings']
return retVal
def _CheckAggregationFile(self, netSet, lineAggregation):
aggSet = set([key for key in lineAggregation.iterkeys()])
linesInNetworkButNotMapped = [id for id in (netSet - aggSet)]
linesMappedButNotInNetwork = [id for id in (aggSet - netSet)]
if len(linesMappedButNotInNetwork) > 0:
msg = "%s lines have been found in the network without a line grouping: " %len(linesInNetworkButNotMapped)
msg += ",".join(linesInNetworkButNotMapped[:10])
if len(linesInNetworkButNotMapped) > 10:
msg += "...(%s more)" %(len(linesInNetworkButNotMapped) - 10)
print msg
if len(linesMappedButNotInNetwork) > 0:
msg = "%s lines have been found in the aggregation file but do not exist in the network: " %len(linesMappedButNotInNetwork)
msg += ",".join(linesMappedButNotInNetwork[:10])
if len(linesMappedButNotInNetwork) > 10:
msg += "...(%s more)" %(len(linesMappedButNotInNetwork) - 10)
print msg
def _GetWalkAllWayMatrix(self, scenario):
with _util.tempMatrixMANAGER() as wawMatrix:
self._RunStrategyAnalysis(wawMatrix.id, scenario)
return self._SumWalkAllWayMatrix(wawMatrix.id, scenario)
def _RunStrategyAnalysis(self, wawMatrixId, scenario):
spec = {
"trip_components": {
"boarding": None,
"in_vehicle": "length",
"aux_transit": None,
"alighting": None
},
"sub_path_combination_operator": "+",
"sub_strategy_combination_operator": ".min.",
"selected_demand_and_transit_volumes": {
"sub_strategies_to_retain": "FROM_COMBINATION_OPERATOR",
"selection_threshold": {
"lower": 0,
"upper": 0
}
},
"analyzed_demand": None,
"constraint": None,
"results": {
"strategy_values": None,
"selected_demand": wawMatrixId,
"transit_volumes": None,
"aux_transit_volumes": None,
"total_boardings": None,
"total_alightings": None
},
"type": "EXTENDED_TRANSIT_STRATEGY_ANALYSIS"
}
strategyAnalysisTool(spec, scenario= scenario)
def _SumWalkAllWayMatrix(self, wawMatrixId, scenario):
spec = {
"expression": wawMatrixId,
"result": None,
"constraint": {
"by_value": None,
"by_zone": None
},
"aggregation": {
"origins": "+",
"destinations": "+"
},
"type": "MATRIX_CALCULATION"
}
if EMME_VERSION >= (4,2,1):
return matrixCalculator(spec, scenario= scenario, num_processors=self.NumberOfProcessors)['result']
else:
return matrixCalculator(spec, scenario= scenario)['result']
##########################################################################################################
@_m.method(return_type=_m.TupleType)
def percent_completed(self):
return self.TRACKER.getProgress()
@_m.method(return_type=unicode)
def tool_run_msg_status(self):
return self.tool_run_msg
|
VimVincent/three.js | refs/heads/master | utils/exporters/blender/addons/io_three/exporter/scene.py | 124 | import os
from .. import constants, logger
from . import (
base_classes,
texture,
material,
geometry,
object as object_,
utilities,
io,
api
)
class Scene(base_classes.BaseScene):
"""Class that handles the contruction of a Three scene"""
_defaults = {
constants.METADATA: constants.DEFAULT_METADATA.copy(),
constants.GEOMETRIES: [],
constants.MATERIALS: [],
constants.IMAGES: [],
constants.TEXTURES: []
}
def __init__(self, filepath, options=None):
logger.debug("Scene().__init__(%s, %s)", filepath, options)
base_classes.BaseScene.__init__(self, filepath, options or {})
source_file = api.scene_name()
if source_file:
self[constants.METADATA][constants.SOURCE_FILE] = source_file
@property
def valid_types(self):
"""
:return: list of valid node types
"""
valid_types = [api.constants.MESH]
if self.options.get(constants.HIERARCHY, False):
valid_types.append(api.constants.EMPTY)
if self.options.get(constants.CAMERAS):
logger.info("Adding cameras to valid object types")
valid_types.append(api.constants.CAMERA)
if self.options.get(constants.LIGHTS):
logger.info("Adding lights to valid object types")
valid_types.append(api.constants.LAMP)
return valid_types
def geometry(self, value):
"""Find a geometry node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().geometry(%s)", value)
return _find_node(value, self[constants.GEOMETRIES])
def image(self, value):
"""Find a image node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().image%s)", value)
return _find_node(value, self[constants.IMAGES])
def material(self, value):
"""Find a material node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().material(%s)", value)
return _find_node(value, self[constants.MATERIALS])
def parse(self):
"""Execute the parsing of the scene"""
logger.debug("Scene().parse()")
if self.options.get(constants.MAPS):
self._parse_textures()
if self.options.get(constants.MATERIALS):
self._parse_materials()
self._parse_geometries()
self._parse_objects()
def texture(self, value):
"""Find a texture node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().texture(%s)", value)
return _find_node(value, self[constants.TEXTURES])
def write(self):
"""Write the parsed scene to disk."""
logger.debug("Scene().write()")
data = {}
embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
embed = self.options.get(constants.EMBED_GEOMETRY, True)
compression = self.options.get(constants.COMPRESSION)
extension = constants.EXTENSIONS.get(
compression,
constants.EXTENSIONS[constants.JSON])
export_dir = os.path.dirname(self.filepath)
for key, value in self.items():
if key == constants.GEOMETRIES:
geometries = []
for geom in value:
if not embed_anim:
geom.write_animation(export_dir)
geom_data = geom.copy()
if embed:
geometries.append(geom_data)
continue
geo_type = geom_data[constants.TYPE].lower()
if geo_type == constants.GEOMETRY.lower():
geom_data.pop(constants.DATA)
elif geo_type == constants.BUFFER_GEOMETRY.lower():
geom_data.pop(constants.ATTRIBUTES)
geom_data.pop(constants.METADATA)
url = 'geometry.%s%s' % (geom.node, extension)
geometry_file = os.path.join(export_dir, url)
geom.write(filepath=geometry_file)
geom_data[constants.URL] = os.path.basename(url)
geometries.append(geom_data)
data[key] = geometries
elif isinstance(value, list):
data[key] = []
for each in value:
data[key].append(each.copy())
elif isinstance(value, dict):
data[key] = value.copy()
io.dump(self.filepath, data, options=self.options)
if self.options.get(constants.COPY_TEXTURES):
texture_folder = self.options.get(constants.TEXTURE_FOLDER)
for geo in self[constants.GEOMETRIES]:
logger.info("Copying textures from %s", geo.node)
geo.copy_textures(texture_folder)
def _parse_geometries(self):
"""Locate all geometry nodes and parse them"""
logger.debug("Scene()._parse_geometries()")
# this is an important step. please refer to the doc string
# on the function for more information
api.object.prep_meshes(self.options)
geometries = []
# now iterate over all the extracted mesh nodes and parse each one
for mesh in api.object.extracted_meshes():
logger.info("Parsing geometry %s", mesh)
geo = geometry.Geometry(mesh, self)
geo.parse()
geometries.append(geo)
logger.info("Added %d geometry nodes", len(geometries))
self[constants.GEOMETRIES] = geometries
def _parse_materials(self):
"""Locate all non-orphaned materials and parse them"""
logger.debug("Scene()._parse_materials()")
materials = []
for material_name in api.material.used_materials():
logger.info("Parsing material %s", material_name)
materials.append(material.Material(material_name, parent=self))
logger.info("Added %d material nodes", len(materials))
self[constants.MATERIALS] = materials
def _parse_objects(self):
"""Locate all valid objects in the scene and parse them"""
logger.debug("Scene()._parse_objects()")
try:
scene_name = self[constants.METADATA][constants.SOURCE_FILE]
except KeyError:
scene_name = constants.SCENE
self[constants.OBJECT] = object_.Object(None, parent=self)
self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
self[constants.UUID] = utilities.id_from_name(scene_name)
objects = []
if self.options.get(constants.HIERARCHY, False):
nodes = api.object.assemblies(self.valid_types, self.options)
else:
nodes = api.object.nodes(self.valid_types, self.options)
for node in nodes:
logger.info("Parsing object %s", node)
obj = object_.Object(node, parent=self[constants.OBJECT])
objects.append(obj)
logger.info("Added %d object nodes", len(objects))
self[constants.OBJECT][constants.CHILDREN] = objects
def _parse_textures(self):
"""Locate all non-orphaned textures and parse them"""
logger.debug("Scene()._parse_textures()")
textures = []
for texture_name in api.texture.textures():
logger.info("Parsing texture %s", texture_name)
tex_inst = texture.Texture(texture_name, self)
textures.append(tex_inst)
logger.info("Added %d texture nodes", len(textures))
self[constants.TEXTURES] = textures
def _find_node(value, manifest):
"""Find a node that matches either a name
or uuid value.
:param value: name or uuid
:param manifest: manifest of nodes to search
:type value: str
:type manifest: list
"""
for index in manifest:
uuid = index.get(constants.UUID) == value
name = index.node == value
if uuid or name:
return index
else:
logger.debug("No matching node for %s", value)
|
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/tornado/test/options_test.py | 81 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import os
import sys
from tornado.options import OptionParser, Error
from tornado.util import basestring_type
from tornado.test.util import unittest
try:
from cStringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
try:
from unittest import mock # python 3.3
except ImportError:
try:
import mock # third-party mock package
except ImportError:
mock = None
class OptionsTest(unittest.TestCase):
def test_parse_command_line(self):
options = OptionParser()
options.define("port", default=80)
options.parse_command_line(["main.py", "--port=443"])
self.assertEqual(options.port, 443)
def test_parse_config_file(self):
options = OptionParser()
options.define("port", default=80)
options.define("username", default='foo')
options.parse_config_file(os.path.join(os.path.dirname(__file__),
"options_test.cfg"))
self.assertEquals(options.port, 443)
self.assertEqual(options.username, "李康")
def test_parse_callbacks(self):
options = OptionParser()
self.called = False
def callback():
self.called = True
options.add_parse_callback(callback)
# non-final parse doesn't run callbacks
options.parse_command_line(["main.py"], final=False)
self.assertFalse(self.called)
# final parse does
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
# callbacks can be run more than once on the same options
# object if there are multiple final parses
self.called = False
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
def test_help(self):
options = OptionParser()
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(SystemExit):
options.parse_command_line(["main.py", "--help"])
usage = sys.stderr.getvalue()
finally:
sys.stderr = orig_stderr
self.assertIn("Usage:", usage)
def test_subcommand(self):
base_options = OptionParser()
base_options.define("verbose", default=False)
sub_options = OptionParser()
sub_options.define("foo", type=str)
rest = base_options.parse_command_line(
["main.py", "--verbose", "subcommand", "--foo=bar"])
self.assertEqual(rest, ["subcommand", "--foo=bar"])
self.assertTrue(base_options.verbose)
rest2 = sub_options.parse_command_line(rest)
self.assertEqual(rest2, [])
self.assertEqual(sub_options.foo, "bar")
# the two option sets are distinct
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(Error):
sub_options.parse_command_line(["subcommand", "--verbose"])
finally:
sys.stderr = orig_stderr
def test_setattr(self):
options = OptionParser()
options.define('foo', default=1, type=int)
options.foo = 2
self.assertEqual(options.foo, 2)
def test_setattr_type_check(self):
# setattr requires that options be the right type and doesn't
# parse from string formats.
options = OptionParser()
options.define('foo', default=1, type=int)
with self.assertRaises(Error):
options.foo = '2'
def test_setattr_with_callback(self):
values = []
options = OptionParser()
options.define('foo', default=1, type=int, callback=values.append)
options.foo = 2
self.assertEqual(values, [2])
def _sample_options(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', default=2)
return options
def test_iter(self):
options = self._sample_options()
# OptionParsers always define 'help'.
self.assertEqual(set(['a', 'b', 'help']), set(iter(options)))
def test_getitem(self):
options = self._sample_options()
self.assertEqual(1, options['a'])
def test_items(self):
options = self._sample_options()
# OptionParsers always define 'help'.
expected = [('a', 1), ('b', 2), ('help', options.help)]
actual = sorted(options.items())
self.assertEqual(expected, actual)
def test_as_dict(self):
options = self._sample_options()
expected = {'a': 1, 'b': 2, 'help': options.help}
self.assertEqual(expected, options.as_dict())
def test_group_dict(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', group='b_group', default=2)
frame = sys._getframe(0)
this_file = frame.f_code.co_filename
self.assertEqual(set(['b_group', '', this_file]), options.groups())
b_group_dict = options.group_dict('b_group')
self.assertEqual({'b': 2}, b_group_dict)
self.assertEqual({}, options.group_dict('nonexistent'))
@unittest.skipIf(mock is None, 'mock package not present')
def test_mock_patch(self):
# ensure that our setattr hooks don't interfere with mock.patch
options = OptionParser()
options.define('foo', default=1)
options.parse_command_line(['main.py', '--foo=2'])
self.assertEqual(options.foo, 2)
with mock.patch.object(options.mockable(), 'foo', 3):
self.assertEqual(options.foo, 3)
self.assertEqual(options.foo, 2)
# Try nested patches mixed with explicit sets
with mock.patch.object(options.mockable(), 'foo', 4):
self.assertEqual(options.foo, 4)
options.foo = 5
self.assertEqual(options.foo, 5)
with mock.patch.object(options.mockable(), 'foo', 6):
self.assertEqual(options.foo, 6)
self.assertEqual(options.foo, 5)
self.assertEqual(options.foo, 2)
def test_types(self):
options = OptionParser()
options.define('str', type=str)
options.define('basestring', type=basestring_type)
options.define('int', type=int)
options.define('float', type=float)
options.define('datetime', type=datetime.datetime)
options.define('timedelta', type=datetime.timedelta)
options.parse_command_line(['main.py',
'--str=asdf',
'--basestring=qwer',
'--int=42',
'--float=1.5',
'--datetime=2013-04-28 05:16',
'--timedelta=45s'])
self.assertEqual(options.str, 'asdf')
self.assertEqual(options.basestring, 'qwer')
self.assertEqual(options.int, 42)
self.assertEqual(options.float, 1.5)
self.assertEqual(options.datetime,
datetime.datetime(2013, 4, 28, 5, 16))
self.assertEqual(options.timedelta, datetime.timedelta(seconds=45))
def test_multiple_string(self):
options = OptionParser()
options.define('foo', type=str, multiple=True)
options.parse_command_line(['main.py', '--foo=a,b,c'])
self.assertEqual(options.foo, ['a', 'b', 'c'])
def test_multiple_int(self):
options = OptionParser()
options.define('foo', type=int, multiple=True)
options.parse_command_line(['main.py', '--foo=1,3,5:7'])
self.assertEqual(options.foo, [1, 3, 5, 6, 7])
def test_error_redefine(self):
options = OptionParser()
options.define('foo')
with self.assertRaises(Error) as cm:
options.define('foo')
self.assertRegexpMatches(str(cm.exception),
'Option.*foo.*already defined')
def test_dash_underscore_cli(self):
# Dashes and underscores should be interchangeable.
for defined_name in ['foo-bar', 'foo_bar']:
for flag in ['--foo-bar=a', '--foo_bar=a']:
options = OptionParser()
options.define(defined_name)
options.parse_command_line(['main.py', flag])
# Attr-style access always uses underscores.
self.assertEqual(options.foo_bar, 'a')
# Dict-style access allows both.
self.assertEqual(options['foo-bar'], 'a')
self.assertEqual(options['foo_bar'], 'a')
def test_dash_underscore_file(self):
# No matter how an option was defined, it can be set with underscores
# in a config file.
for defined_name in ['foo-bar', 'foo_bar']:
options = OptionParser()
options.define(defined_name)
options.parse_config_file(os.path.join(os.path.dirname(__file__),
"options_test.cfg"))
self.assertEqual(options.foo_bar, 'a')
def test_dash_underscore_introspection(self):
# Original names are preserved in introspection APIs.
options = OptionParser()
options.define('with-dash', group='g')
options.define('with_underscore', group='g')
all_options = ['help', 'with-dash', 'with_underscore']
self.assertEqual(sorted(options), all_options)
self.assertEqual(sorted(k for (k, v) in options.items()), all_options)
self.assertEqual(sorted(options.as_dict().keys()), all_options)
self.assertEqual(sorted(options.group_dict('g')),
['with-dash', 'with_underscore'])
# --help shows CLI-style names with dashes.
buf = StringIO()
options.print_help(buf)
self.assertIn('--with-dash', buf.getvalue())
self.assertIn('--with-underscore', buf.getvalue())
|
lento/cortex | refs/heads/master | test/IECore/FileSequenceVectorParameter.py | 12 | ##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import os
import shutil
class TestFileSequenceVectorParameter( unittest.TestCase ) :
def mkSequence( self, sequence ) :
directory = "test/sequences/parameterTest"
for f in sequence.fileNames() :
os.system( "touch " + directory + "/" + f )
def test( self ) :
s1 = IECore.FileSequence( "a.#.tif", IECore.FrameRange( 1, 10 ) )
self.mkSequence( s1 )
s2 = IECore.FileSequence( "b.#.tif", IECore.FrameRange( 5, 20 ) )
self.mkSequence( s2 )
p = IECore.FileSequenceVectorParameter( name = "n", description = "d", check = IECore.FileSequenceVectorParameter.CheckType.MustExist )
# should raise because it's not a valid sequence string
t = IECore.StringVectorData()
t.append( "hello" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
# should raise because it doesn't exist
t = IECore.StringVectorData()
t.append( "hello.###.tif" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
t = IECore.StringVectorData()
t.append( "test/sequences/parameterTest/a.#.tif" )
p.setValidatedValue( t )
p = IECore.FileSequenceVectorParameter( name = "n", description = "d", check = IECore.FileSequenceVectorParameter.CheckType.MustNotExist )
# should raise because it's not a valid sequence string
t = IECore.StringVectorData()
t.append( "hello" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
# should be fine because it's a valid string and no sequence like that exists
t = IECore.StringVectorData()
t.append( "hello.###.tif" )
p.setValidatedValue( t )
# should raise because the sequence exists
t = IECore.StringVectorData()
t.append( "test/sequences/parameterTest/a.#.tif" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
p = IECore.FileSequenceVectorParameter( name = "n", description = "d", check = IECore.FileSequenceVectorParameter.CheckType.DontCare )
# should raise because it's not a valid sequence string
t = IECore.StringVectorData()
t.append( "hello" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
t = IECore.StringVectorData()
t.append( "hello.###.tif" )
p.setValidatedValue( t )
t = IECore.StringVectorData()
t.append( "test/sequences/parameterTest/a.#.tif" )
t.append( "test/sequences/parameterTest/b.#.tif" )
p.setValidatedValue( t )
fs = p.getFileSequenceValues()
self.assertEqual( len(fs), 2 )
self.assertEqual( fs[0], IECore.ls( "test/sequences/parameterTest/a.#.tif" ) )
self.assertEqual( fs[1], IECore.ls( "test/sequences/parameterTest/b.#.tif" ) )
fs = p.getFileSequenceValues( t )
self.assertEqual( len(fs), 2 )
self.assertEqual( fs[0], IECore.ls( "test/sequences/parameterTest/a.#.tif" ) )
self.assertEqual( fs[1], IECore.ls( "test/sequences/parameterTest/b.#.tif" ) )
p.setFileSequenceValues( [ IECore.FileSequence( "a.###.tif", IECore.FrameRange( 1, 10 ) ) ] )
t = IECore.StringVectorData()
t.append( "a.###.tif" )
self.assertEqual( p.getValue(), t )
def testEmptyString( self ) :
p = IECore.FileSequenceVectorParameter( name = "n", description = "d", check = IECore.FileSequenceVectorParameter.CheckType.MustExist, allowEmptyList=True )
# should be fine, as we allow the empty list, and that should override the file existence checks
p.setValidatedValue( IECore.StringVectorData( ) )
self.assertEqual( p.valueValid( IECore.StringVectorData( ) )[0], True )
def testNotAStringVector( self ) :
p = IECore.FileSequenceVectorParameter( name = "n", description = "d" )
self.assertRaises( RuntimeError, p.setValidatedValue, IECore.IntData( 1 ) )
def testExtensions( self ) :
p = IECore.FileSequenceVectorParameter( name = "n", description = "d", check = IECore.FileSequenceVectorParameter.CheckType.DontCare, extensions="tif exr jpg" )
self.assertEqual( p.extensions, [ "tif", "exr", "jpg" ] )
t = IECore.StringVectorData()
t.append( "a.#.tif" )
self.assert_( p.valueValid( t )[0] )
t = IECore.StringVectorData()
t.append( "a.#.gif" )
self.assert_( not p.valueValid( t )[0] )
t = IECore.StringVectorData()
t.append( "dsds.#" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
t = IECore.StringVectorData()
t.append( "dsds.###.gif" )
self.assertRaises( RuntimeError, p.setValidatedValue, t )
t = IECore.StringVectorData()
t.append( "dsds.###.tif" )
p.setValidatedValue( t )
def setUp( self ):
directory = "test/sequences/parameterTest"
if os.path.exists( directory ) :
os.system( "rm -rf " + directory )
os.system( "mkdir -p " + directory )
def tearDown( self ):
directory = "test/sequences"
if os.path.exists( directory ) :
shutil.rmtree( directory )
if __name__ == "__main__":
unittest.main()
|
liberorbis/libernext | refs/heads/master | env/lib/python2.7/site-packages/celery/events/__init__.py | 8 | # -*- coding: utf-8 -*-
"""
celery.events
~~~~~~~~~~~~~
Events is a stream of messages sent for certain actions occurring
in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT`
is enabled), used for monitoring purposes.
"""
from __future__ import absolute_import
import os
import time
import threading
import warnings
from collections import deque
from contextlib import contextmanager
from copy import copy
from operator import itemgetter
from kombu import Exchange, Queue, Producer
from kombu.connection import maybe_channel
from kombu.mixins import ConsumerMixin
from kombu.utils import cached_property
from celery.app import app_or_default
from celery.utils import anon_nodename, uuid
from celery.utils.functional import dictfilter
from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms
__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver']
event_exchange = Exchange('celeryev', type='topic')
_TZGETTER = itemgetter('utcoffset', 'timestamp')
W_YAJL = """
anyjson is currently using the yajl library.
This json implementation is broken, it severely truncates floats
so timestamps will not work.
Please uninstall yajl or force anyjson to use a different library.
"""
CLIENT_CLOCK_SKEW = -1
def get_exchange(conn):
ex = copy(event_exchange)
if conn.transport.driver_type == 'redis':
# quick hack for Issue #436
ex.type = 'fanout'
return ex
def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
"""Create an event.
An event is a dictionary, the only required field is ``type``.
A ``timestamp`` field will be set to the current time if not provided.
"""
event = __dict__(_fields, **fields) if _fields else fields
if 'timestamp' not in event:
event.update(timestamp=__now__(), type=type)
else:
event['type'] = type
return event
def group_from(type):
"""Get the group part of an event type name.
E.g.::
>>> group_from('task-sent')
'task'
>>> group_from('custom-my-event')
'custom'
"""
return type.split('-', 1)[0]
class EventDispatcher(object):
"""Dispatches event messages.
:param connection: Connection to the broker.
:keyword hostname: Hostname to identify ourselves as,
by default uses the hostname returned by
:func:`~celery.utils.anon_nodename`.
:keyword groups: List of groups to send events for. :meth:`send` will
ignore send requests to groups not in this list.
If this is :const:`None`, all events will be sent. Example groups
include ``"task"`` and ``"worker"``.
:keyword enabled: Set to :const:`False` to not actually publish any events,
making :meth:`send` a noop operation.
:keyword channel: Can be used instead of `connection` to specify
an exact channel to use when sending events.
:keyword buffer_while_offline: If enabled events will be buffered
while the connection is down. :meth:`flush` must be called
as soon as the connection is re-established.
You need to :meth:`close` this after use.
"""
DISABLED_TRANSPORTS = set(['sql'])
app = None
# set of callbacks to be called when :meth:`enabled`.
on_enabled = None
# set of callbacks to be called when :meth:`disabled`.
on_disabled = None
def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
serializer=None, groups=None):
self.app = app_or_default(app or self.app)
self.connection = connection
self.channel = channel
self.hostname = hostname or anon_nodename()
self.buffer_while_offline = buffer_while_offline
self.mutex = threading.Lock()
self.producer = None
self._outbound_buffer = deque()
self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
self.on_enabled = set()
self.on_disabled = set()
self.groups = set(groups or [])
self.tzoffset = [-time.timezone, -time.altzone]
self.clock = self.app.clock
if not connection and channel:
self.connection = channel.connection.client
self.enabled = enabled
conninfo = self.connection or self.app.connection()
self.exchange = get_exchange(conninfo)
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
self.enabled = False
if self.enabled:
self.enable()
self.headers = {'hostname': self.hostname}
self.pid = os.getpid()
self.warn_if_yajl()
def warn_if_yajl(self):
import anyjson
if anyjson.implementation.name == 'yajl':
warnings.warn(UserWarning(W_YAJL))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def enable(self):
self.producer = Producer(self.channel or self.connection,
exchange=self.exchange,
serializer=self.serializer)
self.enabled = True
for callback in self.on_enabled:
callback()
def disable(self):
if self.enabled:
self.enabled = False
self.close()
for callback in self.on_disabled:
callback()
def publish(self, type, fields, producer, retry=False,
retry_policy=None, blind=False, utcoffset=utcoffset,
Event=Event):
"""Publish event using a custom :class:`~kombu.Producer`
instance.
:param type: Event type name, with group separated by dash (`-`).
:param fields: Dictionary of event fields, must be json serializable.
:param producer: :class:`~kombu.Producer` instance to use,
only the ``publish`` method will be called.
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.
"""
with self.mutex:
clock = None if blind else self.clock.forward()
event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
pid=self.pid, clock=clock, **fields)
exchange = self.exchange
producer.publish(
event,
routing_key=type.replace('-', '.'),
exchange=exchange.name,
retry=retry,
retry_policy=retry_policy,
declare=[exchange],
serializer=self.serializer,
headers=self.headers,
)
def send(self, type, blind=False, **fields):
"""Send event.
:param type: Event type name, with group separated by dash (`-`).
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.
:keyword \*\*fields: Event fields, must be json serializable.
"""
if self.enabled:
groups = self.groups
if groups and group_from(type) not in groups:
return
try:
self.publish(type, fields, self.producer, blind)
except Exception as exc:
if not self.buffer_while_offline:
raise
self._outbound_buffer.append((type, fields, exc))
def flush(self):
"""Flushes the outbound buffer."""
while self._outbound_buffer:
try:
type, fields, _ = self._outbound_buffer.popleft()
except IndexError:
return
self.send(type, **fields)
def extend_buffer(self, other):
"""Copies the outbound buffer of another instance."""
self._outbound_buffer.extend(other._outbound_buffer)
def close(self):
"""Close the event dispatcher."""
self.mutex.locked() and self.mutex.release()
self.producer = None
def _get_publisher(self):
return self.producer
def _set_publisher(self, producer):
self.producer = producer
publisher = property(_get_publisher, _set_publisher) # XXX compat
class EventReceiver(ConsumerMixin):
"""Capture events.
:param connection: Connection to the broker.
:keyword handlers: Event handlers.
:attr:`handlers` is a dict of event types and their handlers,
the special handler `"*"` captures all events that doesn't have a
handler.
"""
app = None
def __init__(self, channel, handlers=None, routing_key='#',
node_id=None, app=None, queue_prefix='celeryev',
accept=None):
self.app = app_or_default(app or self.app)
self.channel = maybe_channel(channel)
self.handlers = {} if handlers is None else handlers
self.routing_key = routing_key
self.node_id = node_id or uuid()
self.queue_prefix = queue_prefix
self.exchange = get_exchange(self.connection or self.app.connection())
self.queue = Queue('.'.join([self.queue_prefix, self.node_id]),
exchange=self.exchange,
routing_key=self.routing_key,
auto_delete=True,
durable=False,
queue_arguments=self._get_queue_arguments())
self.clock = self.app.clock
self.adjust_clock = self.clock.adjust
self.forward_clock = self.clock.forward
if accept is None:
accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
self.accept = accept
def _get_queue_arguments(self):
conf = self.app.conf
return dictfilter({
'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL),
'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES),
})
def process(self, type, event):
"""Process the received event by dispatching it to the appropriate
handler."""
handler = self.handlers.get(type) or self.handlers.get('*')
handler and handler(event)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.queue],
callbacks=[self._receive], no_ack=True,
accept=self.accept)]
def on_consume_ready(self, connection, channel, consumers,
wakeup=True, **kwargs):
if wakeup:
self.wakeup_workers(channel=channel)
def itercapture(self, limit=None, timeout=None, wakeup=True):
return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
def capture(self, limit=None, timeout=None, wakeup=True):
"""Open up a consumer capturing events.
This has to run in the main process, and it will never
stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
"""
return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
def wakeup_workers(self, channel=None):
self.app.control.broadcast('heartbeat',
connection=self.connection,
channel=channel)
def event_from_message(self, body, localize=True,
now=time.time, tzfields=_TZGETTER,
adjust_timestamp=adjust_timestamp,
CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
type = body['type']
if type == 'task-sent':
# clients never sync so cannot use their clock value
_c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
self.adjust_clock(_c)
else:
try:
clock = body['clock']
except KeyError:
body['clock'] = self.forward_clock()
else:
self.adjust_clock(clock)
if localize:
try:
offset, timestamp = tzfields(body)
except KeyError:
pass
else:
body['timestamp'] = adjust_timestamp(timestamp, offset)
body['local_received'] = now()
return type, body
def _receive(self, body, message):
self.process(*self.event_from_message(body))
@property
def connection(self):
return self.channel.connection.client if self.channel else None
class Events(object):
def __init__(self, app=None):
self.app = app
@cached_property
def Receiver(self):
return self.app.subclass_with_self(EventReceiver,
reverse='events.Receiver')
@cached_property
def Dispatcher(self):
return self.app.subclass_with_self(EventDispatcher,
reverse='events.Dispatcher')
@cached_property
def State(self):
return self.app.subclass_with_self('celery.events.state:State',
reverse='events.State')
@contextmanager
def default_dispatcher(self, hostname=None, enabled=True,
buffer_while_offline=False):
with self.app.amqp.producer_pool.acquire(block=True) as prod:
with self.Dispatcher(prod.connection, hostname, enabled,
prod.channel, buffer_while_offline) as d:
yield d
|
hexid/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/test/actions/gyptest-errors.py | 147 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies behavior for different action configuration errors:
exit status of 1, and the expected error message must be in stderr.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('action_missing_name.gyp', chdir='src', status=1, stderr=None)
expect = [
"Anonymous action in target broken_actions2. An action must have an 'action_name' field.",
]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
|
hohe/scikit-rf | refs/heads/develop | doc/sphinxext/comment_eater.py | 6 | from cStringIO import StringIO
import compiler
import inspect
import textwrap
import tokenize
from compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
for token in tokenize.generate_tokens(file.next):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
|
msmbuilder/msmbuilder | refs/heads/master | msmbuilder/preprocessing/base.py | 6 | # Author: Carlos Xavier Hernandez <cxh@stanford.edu>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
import numpy as np
import collections
from ..base import BaseEstimator
from ..utils import check_iter_of_sequences
class MultiSequencePreprocessingMixin(BaseEstimator):
# The API for the scikit-learn preprocessing object is, in fit(), that
# they take a single 2D array of shape (n_data_points, n_features).
#
# For reducing a collection of timeseries, we need to preserve
# the structure of which data_point came from which sequence. If
# we concatenate the sequences together, we lose that information.
#
# This mixin is basically a little "adaptor" that changes fit()
# so that it accepts a list of sequences. Its implementation
# concatenates the sequences, calls the superclass fit(), and
# then splits the labels_ back into the sequenced form.
#
# This code is copied and modified from cluster.MultiSequenceClusterMixin
def fit(self, sequences, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
y : None
Ignored
Returns
-------
self
"""
check_iter_of_sequences(sequences)
s = super(MultiSequencePreprocessingMixin, self)
s.fit(self._concat(sequences))
return self
def _concat(self, sequences):
self.__lengths = [len(s) for s in sequences]
# Indexing will fail on generic iterators
if not isinstance(sequences, collections.Sequence):
sequences = list(sequences)
if len(sequences) > 0 and isinstance(sequences[0], np.ndarray):
concat = np.concatenate(sequences)
else:
# if the input sequences are not numpy arrays, we need to guess
# how to concatenate them. this operation below works for mdtraj
# trajectories (which is the use case that I want to be sure to
# support), but in general the python container protocol doesn't
# give us a generic way to make sure we merged sequences
concat = sequences[0].join(sequences[1:])
assert sum(self.__lengths) == len(concat)
return concat
def _split(self, concat):
return [concat[cl - l: cl] for (cl, l) in
zip(np.cumsum(self.__lengths), self.__lengths)]
def transform(self, sequences):
"""Apply preprocessing to sequences
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Sequence data to transform, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
check_iter_of_sequences(sequences)
transforms = []
for X in sequences:
transforms.append(self.partial_transform(X))
return transforms
def fit_transform(self, sequences, y=None):
"""Fit the model and apply preprocessing
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
y : None
Ignored
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
self.fit(sequences)
transforms = self.transform(sequences)
return transforms
def partial_transform(self, sequence):
"""Apply preprocessing to single sequence
Parameters
----------
sequence: array like, shape (n_samples, n_features)
A single sequence to transform
Returns
-------
out : array like, shape (n_samples, n_features)
"""
s = super(MultiSequencePreprocessingMixin, self)
return s.transform(sequence)
def partial_fit(self, sequence, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
s = super(MultiSequencePreprocessingMixin, self)
if hasattr(s, 'fit'):
return s.fit(sequence)
return self
def fit(self, X, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
return self.partial_fit(np.concatenate(X, axis=0))
class MultiSequenceOnlinePreprocessingMixin(MultiSequencePreprocessingMixin):
def fit(self, sequences, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
y : None
Ignored
Returns
-------
self
"""
check_iter_of_sequences(sequences)
for sequence in sequences:
s = super(MultiSequencePreprocessingMixin, self)
s.partial_fit(sequence)
return self
|
tempbottle/kbengine | refs/heads/master | kbe/src/lib/python/Lib/ctypes/test/test_frombuffer.py | 70 | from ctypes import *
import array
import gc
import unittest
class X(Structure):
_fields_ = [("c_int", c_int)]
init_called = False
def __init__(self):
self._init_called = True
class Test(unittest.TestCase):
def test_fom_buffer(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer(a)
y = X.from_buffer(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], a.tolist())
a[0], a[-1] = 200, -200
self.assertEqual(x[:], a.tolist())
self.assertIn(a, x._objects.values())
self.assertRaises(ValueError,
c_int.from_buffer, a, -1)
expected = x[:]
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], expected)
self.assertRaises(TypeError,
(c_char * 16).from_buffer, "a" * 16)
def test_fom_buffer_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
self.assertRaises(ValueError, lambda: (c_int * 16).from_buffer(a, sizeof(c_int)))
self.assertRaises(ValueError, lambda: (c_int * 1).from_buffer(a, 16 * sizeof(c_int)))
def test_from_buffer_copy(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer_copy(a)
y = X.from_buffer_copy(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], list(range(16)))
a[0], a[-1] = 200, -200
self.assertEqual(x[:], list(range(16)))
self.assertEqual(x._objects, None)
self.assertRaises(ValueError,
c_int.from_buffer, a, -1)
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], list(range(16)))
x = (c_char * 16).from_buffer_copy(b"a" * 16)
self.assertEqual(x[:], b"a" * 16)
def test_fom_buffer_copy_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer_copy(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
self.assertRaises(ValueError,
(c_int * 16).from_buffer_copy, a, sizeof(c_int))
self.assertRaises(ValueError,
(c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int))
if __name__ == '__main__':
unittest.main()
|
zulip/zulip | refs/heads/master | zerver/tests/test_home.py | 1 | import calendar
import urllib
from datetime import timedelta
from typing import Any
from unittest.mock import patch
import orjson
import pytz
from django.conf import settings
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from corporate.models import Customer, CustomerPlan
from zerver.lib.actions import change_user_is_active, do_change_plan_type, do_create_user
from zerver.lib.compatibility import LAST_SERVER_UPGRADE_TIME, is_outdated_server
from zerver.lib.home import (
get_billing_info,
get_furthest_read_time,
promote_sponsoring_zulip_in_realm,
)
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_user_messages, override_settings, queries_captured
from zerver.lib.users import compute_show_invites_and_add_streams
from zerver.models import (
DefaultStream,
Realm,
UserActivity,
UserProfile,
flush_per_request_caches,
get_realm,
get_stream,
get_system_bot,
get_user,
)
from zerver.worker.queue_processors import UserActivityWorker
logger_string = "zulip.soft_deactivation"
class HomeTest(ZulipTestCase):
# Keep this list sorted!!!
expected_page_params_keys = [
"alert_words",
"apps_page_url",
"available_notification_sounds",
"avatar_source",
"avatar_url",
"avatar_url_medium",
"bot_types",
"can_create_streams",
"can_invite_others_to_realm",
"can_subscribe_other_users",
"color_scheme",
"corporate_enabled",
"cross_realm_bots",
"custom_profile_field_types",
"custom_profile_fields",
"default_language",
"default_view",
"delivery_email",
"demote_inactive_streams",
"dense_mode",
"desktop_icon_count_display",
"development_environment",
"email",
"emojiset",
"emojiset_choices",
"enable_desktop_notifications",
"enable_digest_emails",
"enable_login_emails",
"enable_marketing_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enable_online_push_notifications",
"enable_sounds",
"enable_stream_audible_notifications",
"enable_stream_desktop_notifications",
"enable_stream_email_notifications",
"enable_stream_push_notifications",
"enter_sends",
"event_queue_longpoll_timeout_seconds",
"first_in_realm",
"fluid_layout_width",
"full_name",
"furthest_read_time",
"giphy_api_key",
"giphy_rating_options",
"has_zoom_token",
"high_contrast_mode",
"hotspots",
"insecure_desktop_app",
"is_admin",
"is_billing_admin",
"is_guest",
"is_moderator",
"is_owner",
"is_spectator",
"jitsi_server_url",
"language_list",
"last_event_id",
"left_side_userlist",
"login_page",
"max_avatar_file_size_mib",
"max_file_upload_size_mib",
"max_icon_file_size_mib",
"max_logo_file_size_mib",
"max_message_id",
"max_message_length",
"max_stream_description_length",
"max_stream_name_length",
"max_topic_length",
"message_content_in_email_notifications",
"muted_topics",
"muted_users",
"narrow",
"narrow_stream",
"needs_tutorial",
"never_subscribed",
"no_event_queue",
"notification_sound",
"password_min_guesses",
"password_min_length",
"pm_content_in_desktop_notifications",
"presence_enabled",
"presences",
"promote_sponsoring_zulip",
"prompt_for_invites",
"queue_id",
"realm_add_emoji_by_admins_only",
"realm_allow_edit_history",
"realm_allow_message_deleting",
"realm_allow_message_editing",
"realm_authentication_methods",
"realm_available_video_chat_providers",
"realm_avatar_changes_disabled",
"realm_bot_creation_policy",
"realm_bot_domain",
"realm_bots",
"realm_community_topic_editing_limit_seconds",
"realm_create_stream_policy",
"realm_default_code_block_language",
"realm_default_external_accounts",
"realm_default_language",
"realm_default_stream_groups",
"realm_default_streams",
"realm_default_twenty_four_hour_time",
"realm_description",
"realm_digest_emails_enabled",
"realm_digest_weekday",
"realm_disallow_disposable_email_addresses",
"realm_domains",
"realm_edit_topic_policy",
"realm_email_address_visibility",
"realm_email_auth_enabled",
"realm_email_changes_disabled",
"realm_emails_restricted_to_domains",
"realm_embedded_bots",
"realm_emoji",
"realm_filters",
"realm_giphy_rating",
"realm_icon_source",
"realm_icon_url",
"realm_incoming_webhook_bots",
"realm_inline_image_preview",
"realm_inline_url_embed_preview",
"realm_invite_required",
"realm_invite_to_realm_policy",
"realm_invite_to_stream_policy",
"realm_is_zephyr_mirror_realm",
"realm_linkifiers",
"realm_logo_source",
"realm_logo_url",
"realm_mandatory_topics",
"realm_message_content_allowed_in_email_notifications",
"realm_message_content_delete_limit_seconds",
"realm_message_content_edit_limit_seconds",
"realm_message_retention_days",
"realm_move_messages_between_streams_policy",
"realm_name",
"realm_name_changes_disabled",
"realm_name_in_notifications",
"realm_night_logo_source",
"realm_night_logo_url",
"realm_non_active_users",
"realm_notifications_stream_id",
"realm_password_auth_enabled",
"realm_plan_type",
"realm_playgrounds",
"realm_presence_disabled",
"realm_private_message_policy",
"realm_push_notifications_enabled",
"realm_send_welcome_emails",
"realm_signup_notifications_stream_id",
"realm_upload_quota_mib",
"realm_uri",
"realm_user_group_edit_policy",
"realm_user_groups",
"realm_users",
"realm_video_chat_provider",
"realm_waiting_period_threshold",
"realm_wildcard_mention_policy",
"recent_private_conversations",
"request_language",
"save_stacktraces",
"search_pills_enabled",
"server_avatar_changes_disabled",
"server_generation",
"server_inline_image_preview",
"server_inline_url_embed_preview",
"server_name_changes_disabled",
"server_needs_upgrade",
"server_timestamp",
"settings_send_digest_emails",
"show_billing",
"show_invites",
"show_plans",
"show_webathena",
"starred_message_counts",
"starred_messages",
"stop_words",
"subscriptions",
"test_suite",
"timezone",
"translate_emoticons",
"translation_data",
"twenty_four_hour_time",
"two_fa_enabled",
"two_fa_enabled_user",
"unread_msgs",
"unsubscribed",
"upgrade_text_for_wide_organization_logo",
"user_id",
"user_status",
"warn_no_email",
"webpack_public_path",
"wildcard_mentions_notify",
"zulip_feature_level",
"zulip_merge_base",
"zulip_plan_is_not_limited",
"zulip_version",
]
def test_home(self) -> None:
# Keep this list sorted!!!
html_bits = [
"start the conversation",
"Keyboard shortcuts",
"Loading...",
"Filter streams",
# Verify that the app styles get included
"app-stubentry.js",
"data-params",
]
self.login("hamlet")
# Create bot for realm_bots testing. Must be done before fetching home_page.
bot_info = {
"full_name": "The Bot of Hamlet",
"short_name": "hambot",
}
self.client_post("/json/bots", bot_info)
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured() as queries:
with patch("zerver.lib.cache.cache_set") as cache_mock:
result = self._get_home_page(stream="Denmark")
self.check_rendered_logged_in_app(result)
self.assertEqual(
set(result["Cache-Control"].split(", ")), {"must-revalidate", "no-store", "no-cache"}
)
self.assert_length(queries, 40)
self.assert_length(cache_mock.call_args_list, 5)
html = result.content.decode("utf-8")
for html_bit in html_bits:
if html_bit not in html:
raise AssertionError(f"{html_bit} not in result")
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
self.assertEqual(actual_keys, self.expected_page_params_keys)
# TODO: Inspect the page_params data further.
# print(orjson.dumps(page_params, option=orjson.OPT_INDENT_2).decode())
realm_bots_expected_keys = [
"api_key",
"avatar_url",
"bot_type",
"default_all_public_streams",
"default_events_register_stream",
"default_sending_stream",
"email",
"full_name",
"is_active",
"owner_id",
"services",
"user_id",
]
realm_bots_actual_keys = sorted(str(key) for key in page_params["realm_bots"][0].keys())
self.assertEqual(realm_bots_actual_keys, realm_bots_expected_keys)
def test_logged_out_home(self) -> None:
result = self.client_get("/")
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
actual_keys = sorted(str(k) for k in page_params.keys())
removed_keys = [
"last_event_id",
"narrow",
"narrow_stream",
]
expected_keys = [i for i in self.expected_page_params_keys if i not in removed_keys]
self.assertEqual(actual_keys, expected_keys)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login("iago")
result = self._get_home_page()
# Should be successful because otp device is not configured.
self.check_rendered_logged_in_app(result)
def test_home_under_2fa_with_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
user_profile = self.example_user("iago")
self.create_default_device(user_profile)
self.login_user(user_profile)
result = self._get_home_page()
# User should not log in because otp device is configured but
# 2fa login function was not called.
self.assertEqual(result.status_code, 302)
self.login_2fa(user_profile)
result = self._get_home_page()
# Should be successful after calling 2fa login function.
self.check_rendered_logged_in_app(result)
def test_num_queries_for_realm_admin(self) -> None:
# Verify number of queries for Realm admin isn't much higher than for normal users.
self.login("iago")
flush_per_request_caches()
with queries_captured() as queries:
with patch("zerver.lib.cache.cache_set") as cache_mock:
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
self.assert_length(cache_mock.call_args_list, 6)
self.assert_length(queries, 37)
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user("hamlet")
other_user = self.example_user("cordelia")
realm_id = main_user.realm_id
self.login_user(main_user)
# Try to make page-load do extra work for various subscribed
# streams.
for i in range(10):
stream_name = "test_stream_" + str(i)
stream = self.make_stream(stream_name)
DefaultStream.objects.create(
realm_id=realm_id,
stream_id=stream.id,
)
for user in [main_user, other_user]:
self.subscribe(user, stream_name)
# Simulate hitting the page the first time to avoid some noise
# related to initial logins.
self._get_home_page()
# Then for the second page load, measure the number of queries.
flush_per_request_caches()
with queries_captured() as queries2:
result = self._get_home_page()
self.assert_length(queries2, 35)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode("utf-8")
self.assertIn("test_stream_7", html)
def _get_home_page(self, **kwargs: Any) -> HttpResponse:
with patch("zerver.lib.events.request_event_queue", return_value=42), patch(
"zerver.lib.events.get_user_events", return_value=[]
):
result = self.client_get("/", dict(**kwargs))
return result
def assertInHomePage(self, string: str) -> bool:
return self.assertIn(string, self._get_home_page().content.decode("utf-8"))
def assertNotInHomePage(self, string: str) -> bool:
return self.assertNotIn(string, self._get_home_page().content.decode("utf-8"))
def _sanity_check(self, result: HttpResponse) -> None:
"""
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
"""
html = result.content.decode("utf-8")
if "start a conversation" not in html:
raise AssertionError("Home page probably did not load.")
def test_terms_of_service(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
for user_tos_version in [None, "1.1", "2.0.3.4"]:
user.tos_version = user_tos_version
user.save()
with self.settings(TERMS_OF_SERVICE="whatever"), self.settings(TOS_VERSION="99.99"):
result = self.client_get("/", dict(stream="Denmark"))
html = result.content.decode("utf-8")
self.assertIn("Accept the new Terms of Service", html)
def test_banned_desktop_app_versions(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_get("/", HTTP_USER_AGENT="ZulipElectron/2.3.82")
html = result.content.decode("utf-8")
self.assertIn("You are using old version of the Zulip desktop", html)
def test_unsupported_browser(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# currently we don't support IE, so some of IE's user agents are added.
unsupported_user_agents = [
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2)",
"Mozilla/5.0 (Windows NT 10.0; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
]
for user_agent in unsupported_user_agents:
result = self.client_get("/", HTTP_USER_AGENT=user_agent)
html = result.content.decode("utf-8")
self.assertIn("Internet Explorer is not supported by Zulip.", html)
def test_terms_of_service_first_time_template(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
user.tos_version = None
user.save()
with self.settings(FIRST_TIME_TOS_TEMPLATE="hello.html"), self.settings(
TOS_VERSION="99.99"
):
result = self.client_post("/accounts/accept_terms/")
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
self.assert_in_response("Chat for distributed teams", result)
def test_accept_terms_of_service(self) -> None:
self.login("hamlet")
result = self.client_post("/accounts/accept_terms/")
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
result = self.client_post("/accounts/accept_terms/", {"terms": True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/")
def test_bad_narrow(self) -> None:
self.login("hamlet")
with self.assertLogs(level="WARNING") as m:
result = self._get_home_page(stream="Invalid Stream")
self.assertEqual(m.output, ["WARNING:root:Invalid narrow requested, ignoring"])
self._sanity_check(result)
def test_topic_narrow(self) -> None:
self.login("hamlet")
result = self._get_home_page(stream="Denmark", topic="lunch")
self._sanity_check(result)
html = result.content.decode("utf-8")
self.assertIn("lunch", html)
self.assertEqual(
set(result["Cache-Control"].split(", ")), {"must-revalidate", "no-store", "no-cache"}
)
def test_notifications_stream(self) -> None:
realm = get_realm("zulip")
realm.notifications_stream_id = get_stream("Denmark", realm).id
realm.save()
self.login("hamlet")
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(
page_params["realm_notifications_stream_id"], get_stream("Denmark", realm).id
)
def create_bot(self, owner: UserProfile, bot_email: str, bot_name: str) -> UserProfile:
user = do_create_user(
email=bot_email,
password="123",
realm=owner.realm,
full_name=bot_name,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=owner,
acting_user=None,
)
return user
def create_non_active_user(self, realm: Realm, email: str, name: str) -> UserProfile:
user = do_create_user(
email=email, password="123", realm=realm, full_name=name, acting_user=None
)
# Doing a full-stack deactivation would be expensive here,
# and we really only need to flip the flag to get a valid
# test.
change_user_is_active(user, False)
return user
def test_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
realm.signup_notifications_stream = get_stream("Denmark", realm)
realm.save()
self.login("hamlet")
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(
page_params["realm_signup_notifications_stream_id"], get_stream("Denmark", realm).id
)
def test_people(self) -> None:
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
self.login_user(hamlet)
bots = {}
for i in range(3):
bots[i] = self.create_bot(
owner=hamlet,
bot_email=f"bot-{i}@zulip.com",
bot_name=f"Bot {i}",
)
for i in range(3):
defunct_user = self.create_non_active_user(
realm=realm,
email=f"defunct-{i}@zulip.com",
name=f"Defunct User {i}",
)
result = self._get_home_page()
page_params = self._get_page_params(result)
"""
We send three lists of users. The first two below are disjoint
lists of users, and the records we send for them have identical
structure.
The realm_bots bucket is somewhat redundant, since all bots will
be in one of the first two buckets. They do include fields, however,
that normal users don't care about, such as default_sending_stream.
"""
buckets = [
"realm_users",
"realm_non_active_users",
"realm_bots",
]
for field in buckets:
users = page_params[field]
self.assertTrue(len(users) >= 3, field)
for rec in users:
self.assertEqual(rec["user_id"], get_user(rec["email"], realm).id)
if field == "realm_bots":
self.assertNotIn("is_bot", rec)
self.assertIn("is_active", rec)
self.assertIn("owner_id", rec)
else:
self.assertIn("is_bot", rec)
self.assertNotIn("is_active", rec)
active_ids = {p["user_id"] for p in page_params["realm_users"]}
non_active_ids = {p["user_id"] for p in page_params["realm_non_active_users"]}
bot_ids = {p["user_id"] for p in page_params["realm_bots"]}
self.assertIn(hamlet.id, active_ids)
self.assertIn(defunct_user.id, non_active_ids)
# Bots can show up in multiple buckets.
self.assertIn(bots[2].id, bot_ids)
self.assertIn(bots[2].id, active_ids)
# Make sure nobody got mis-bucketed.
self.assertNotIn(hamlet.id, non_active_ids)
self.assertNotIn(defunct_user.id, active_ids)
cross_bots = page_params["cross_realm_bots"]
self.assert_length(cross_bots, 3)
cross_bots.sort(key=lambda d: d["email"])
for cross_bot in cross_bots:
# These are either nondeterministic or boring
del cross_bot["timezone"]
del cross_bot["avatar_url"]
del cross_bot["date_joined"]
notification_bot = self.notification_bot()
email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT)
welcome_bot = get_system_bot(settings.WELCOME_BOT)
by_email = lambda d: d["email"]
self.assertEqual(
sorted(cross_bots, key=by_email),
sorted(
[
dict(
avatar_version=email_gateway_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=email_gateway_bot.email,
user_id=email_gateway_bot.id,
full_name=email_gateway_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=email_gateway_bot.role,
is_cross_realm_bot=True,
is_guest=False,
),
dict(
avatar_version=notification_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=notification_bot.email,
user_id=notification_bot.id,
full_name=notification_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=notification_bot.role,
is_cross_realm_bot=True,
is_guest=False,
),
dict(
avatar_version=welcome_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=welcome_bot.email,
user_id=welcome_bot.id,
full_name=welcome_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_billing_admin=False,
role=welcome_bot.role,
is_cross_realm_bot=True,
is_guest=False,
),
],
key=by_email,
),
)
def test_new_stream(self) -> None:
user_profile = self.example_user("hamlet")
stream_name = "New stream"
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params["narrow_stream"], stream_name)
self.assertEqual(page_params["narrow"], [dict(operator="stream", operand=stream_name)])
self.assertEqual(page_params["max_message_id"], -1)
def test_invites_by_admins_only(self) -> None:
user_profile = self.example_user("hamlet")
realm = user_profile.realm
realm.invite_to_realm_policy = Realm.POLICY_ADMINS_ONLY
realm.save()
self.login_user(user_profile)
self.assertFalse(user_profile.is_realm_admin)
self.assertNotInHomePage("Invite more users")
user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
user_profile.save()
self.assertInHomePage("Invite more users")
def test_show_invites_for_guest_users(self) -> None:
user_profile = self.example_user("polonius")
realm = user_profile.realm
realm.invite_to_realm_policy = Realm.POLICY_MEMBERS_ONLY
realm.save()
self.login_user(user_profile)
self.assertFalse(user_profile.is_realm_admin)
self.assertEqual(get_realm("zulip").invite_to_realm_policy, Realm.POLICY_MEMBERS_ONLY)
self.assertNotInHomePage("Invite more users")
def test_get_billing_info(self) -> None:
user = self.example_user("desdemona")
user.role = UserProfile.ROLE_REALM_OWNER
user.save(update_fields=["role"])
# realm owner, but no CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm owner, with inactive CustomerPlan and realm plan_type SELF_HOSTED -> show only billing link
customer = Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_id")
CustomerPlan.objects.create(
customer=customer,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
next_invoice_date=timezone_now(),
tier=CustomerPlan.STANDARD,
status=CustomerPlan.ENDED,
)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm owner, with inactive CustomerPlan and realm plan_type LIMITED -> show billing link and plans
do_change_plan_type(user.realm, Realm.LIMITED, acting_user=None)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertTrue(billing_info.show_plans)
# Always false without CORPORATE_ENABLED
with self.settings(CORPORATE_ENABLED=False):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# Always false without a UserProfile
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(None)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# realm admin, with CustomerPlan and realm plan_type LIMITED -> show only billing plans
user.role = UserProfile.ROLE_REALM_ADMINISTRATOR
user.save(update_fields=["role"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertTrue(billing_info.show_plans)
# billing admin, with CustomerPlan and realm plan_type STANDARD -> show only billing link
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
do_change_plan_type(user.realm, Realm.STANDARD, acting_user=None)
user.save(update_fields=["role", "is_billing_admin"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# member, with CustomerPlan and realm plan_type STANDARD -> neither billing link or plans
user.is_billing_admin = False
user.save(update_fields=["is_billing_admin"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# guest, with CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
user.role = UserProfile.ROLE_GUEST
user.save(update_fields=["role"])
do_change_plan_type(user.realm, Realm.SELF_HOSTED, acting_user=None)
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, but no CustomerPlan and realm plan_type SELF_HOSTED -> neither billing link or plans
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
user.save(update_fields=["role", "is_billing_admin"])
CustomerPlan.objects.all().delete()
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, with sponsorship pending and relam plan_type SELF_HOSTED -> show only billing link
customer.sponsorship_pending = True
customer.save(update_fields=["sponsorship_pending"])
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertTrue(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
# billing admin, no customer object and relam plan_type SELF_HOSTED -> neither billing link or plans
customer.delete()
with self.settings(CORPORATE_ENABLED=True):
billing_info = get_billing_info(user)
self.assertFalse(billing_info.show_billing)
self.assertFalse(billing_info.show_plans)
def test_promote_sponsoring_zulip_in_realm(self) -> None:
realm = get_realm("zulip")
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertTrue(promote_zulip)
with self.settings(PROMOTE_SPONSORING_ZULIP=False):
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertTrue(promote_zulip)
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
do_change_plan_type(realm, Realm.STANDARD, acting_user=None)
promote_zulip = promote_sponsoring_zulip_in_realm(realm)
self.assertFalse(promote_zulip)
def test_desktop_home(self) -> None:
self.login("hamlet")
result = self.client_get("/desktop_home")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/desktop_home/"))
result = self.client_get("/desktop_home/")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result["Location"]).path
self.assertEqual(path, "/")
@override_settings(SERVER_UPGRADE_NAG_DEADLINE_DAYS=365)
def test_is_outdated_server(self) -> None:
# Check when server_upgrade_nag_deadline > last_server_upgrade_time
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
now = LAST_SERVER_UPGRADE_TIME.replace(tzinfo=pytz.utc)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=10)):
self.assertEqual(is_outdated_server(iago), False)
self.assertEqual(is_outdated_server(hamlet), False)
self.assertEqual(is_outdated_server(None), False)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=397)):
self.assertEqual(is_outdated_server(iago), True)
self.assertEqual(is_outdated_server(hamlet), True)
self.assertEqual(is_outdated_server(None), True)
with patch("zerver.lib.compatibility.timezone_now", return_value=now + timedelta(days=380)):
self.assertEqual(is_outdated_server(iago), True)
self.assertEqual(is_outdated_server(hamlet), False)
self.assertEqual(is_outdated_server(None), False)
def test_furthest_read_time(self) -> None:
msg_id = self.send_test_message("hello!", sender_name="iago")
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([msg_id]).decode(), "op": "add", "flag": "read"},
)
# Manually process the UserActivity
now = timezone_now()
activity_time = calendar.timegm(now.timetuple())
user_activity_event = {
"user_profile_id": hamlet.id,
"client_id": 1,
"query": "update_message_flags",
"time": activity_time,
}
yesterday = now - timedelta(days=1)
activity_time_2 = calendar.timegm(yesterday.timetuple())
user_activity_event_2 = {
"user_profile_id": hamlet.id,
"client_id": 2,
"query": "update_message_flags",
"time": activity_time_2,
}
UserActivityWorker().consume_batch([user_activity_event, user_activity_event_2])
# verify furthest_read_time is last activity time, irrespective of client
furthest_read_time = get_furthest_read_time(hamlet)
self.assertGreaterEqual(furthest_read_time, activity_time)
# Check when user has no activity
UserActivity.objects.filter(user_profile=hamlet).delete()
furthest_read_time = get_furthest_read_time(hamlet)
self.assertIsNone(furthest_read_time)
# Check no user profile handling
furthest_read_time = get_furthest_read_time(None)
self.assertIsNotNone(furthest_read_time)
def test_subdomain_homepage(self) -> None:
self.login("hamlet")
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
with patch("zerver.views.home.get_subdomain", return_value=""):
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Chat for distributed teams", result)
with patch("zerver.views.home.get_subdomain", return_value="subdomain"):
result = self._get_home_page()
self._sanity_check(result)
def send_test_message(
self,
content: str,
sender_name: str = "iago",
stream_name: str = "Denmark",
topic_name: str = "foo",
) -> int:
sender = self.example_user(sender_name)
return self.send_stream_message(sender, stream_name, content=content, topic_name=topic_name)
def soft_activate_and_get_unread_count(
self, stream: str = "Denmark", topic: str = "foo"
) -> int:
stream_narrow = self._get_home_page(stream=stream, topic=topic)
page_params = self._get_page_params(stream_narrow)
return page_params["unread_msgs"]["count"]
def test_unread_count_user_soft_deactivation(self) -> None:
# In this test we make sure if a soft deactivated user had unread
# messages before deactivation they remain same way after activation.
long_term_idle_user = self.example_user("hamlet")
self.login_user(long_term_idle_user)
message = "Test message 1"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 1)
query_count = len(queries)
user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
self.login_user(long_term_idle_user)
message = "Test message 2"
self.send_test_message(message)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
# Test here for query count to be at least 5 greater than previous count
# This will assure indirectly that add_missing_messages() was called.
self.assertGreaterEqual(len(queries) - query_count, 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
def test_multiple_user_soft_deactivations(self) -> None:
long_term_idle_user = self.example_user("hamlet")
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_test_message("Testing", sender_name="hamlet")
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
message = "Test message 1"
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = "Test message 2"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 3)
# Test here for query count to be at least 5 less than previous count.
# This will assure add_missing_messages() isn't repeatedly called.
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(logger_string, level="INFO") as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(
info_log.output,
[
f"INFO:{logger_string}:Soft deactivated user {long_term_idle_user.id}",
f"INFO:{logger_string}:Soft-deactivated batch of 1 users; 0 remain to process",
],
)
message = "Test message 3"
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 4)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = "Test message 4"
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 5)
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
def test_url_language(self) -> None:
user = self.example_user("hamlet")
user.default_language = "es"
user.save()
self.login_user(user)
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
with patch("zerver.lib.events.request_event_queue", return_value=42), patch(
"zerver.lib.events.get_user_events", return_value=[]
):
result = self.client_get("/de/")
page_params = self._get_page_params(result)
self.assertEqual(page_params["default_language"], "es")
# TODO: Verify that the actual language we're using in the
# translation data is German.
def test_translation_data(self) -> None:
user = self.example_user("hamlet")
user.default_language = "es"
user.save()
self.login_user(user)
result = self._get_home_page()
self.check_rendered_logged_in_app(result)
page_params = self._get_page_params(result)
self.assertEqual(page_params["default_language"], "es")
def test_compute_show_invites_and_add_streams_admin(self) -> None:
user = self.example_user("iago")
realm = user.realm
realm.invite_to_realm_policy = Realm.POLICY_ADMINS_ONLY
realm.save()
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, True)
self.assertEqual(show_add_streams, True)
def test_compute_show_invites_and_add_streams_require_admin(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
realm.invite_to_realm_policy = Realm.POLICY_ADMINS_ONLY
realm.save()
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, True)
def test_compute_show_invites_and_add_streams_guest(self) -> None:
user = self.example_user("polonius")
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, False)
def test_compute_show_invites_and_add_streams_unauthenticated(self) -> None:
show_invites, show_add_streams = compute_show_invites_and_add_streams(None)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, False)
|
by46/recipe | refs/heads/master | recipe/__main__.py | 1 | from recipe.main import main
main()
|
sigvef/elma | refs/heads/master | elma/utils.py | 1 | try:
bytes('A', 'latin1')
except TypeError:
bytes = lambda a, b: a # noqa
chr = lambda a: a if type(a) == str else chr(a) # noqa
def null_padded(string, length):
"""
Force a string to a given length by right-padding it with zero-bytes,
clipping the initial string if neccessary.
"""
return bytes(string[:length] + ('\0' * (length - len(string))), 'latin1')
|
seann1/portfolio5 | refs/heads/master | .meteor/dev_bundle/python/Lib/idlelib/FileList.py | 123 | import os
from Tkinter import *
import tkMessageBox
class FileList:
# N.B. this import overridden in PyShellFileList.
from idlelib.EditorWindow import EditorWindow
def __init__(self, root):
self.root = root
self.dict = {}
self.inversedict = {}
self.vars = {} # For EditorWindow.getrawvar (shared Tcl variables)
def open(self, filename, action=None):
assert filename
filename = self.canonize(filename)
if os.path.isdir(filename):
# This can happen when bad filename is passed on command line:
tkMessageBox.showerror(
"File Error",
"%r is a directory." % (filename,),
master=self.root)
return None
key = os.path.normcase(filename)
if key in self.dict:
edit = self.dict[key]
edit.top.wakeup()
return edit
if action:
# Don't create window, perform 'action', e.g. open in same window
return action(filename)
else:
return self.EditorWindow(self, filename, key)
def gotofileline(self, filename, lineno=None):
edit = self.open(filename)
if edit is not None and lineno is not None:
edit.gotoline(lineno)
def new(self, filename=None):
return self.EditorWindow(self, filename)
def close_all_callback(self, *args, **kwds):
for edit in self.inversedict.keys():
reply = edit.close()
if reply == "cancel":
break
return "break"
def unregister_maybe_terminate(self, edit):
try:
key = self.inversedict[edit]
except KeyError:
print "Don't know this EditorWindow object. (close)"
return
if key:
del self.dict[key]
del self.inversedict[edit]
if not self.inversedict:
self.root.quit()
def filename_changed_edit(self, edit):
edit.saved_change_hook()
try:
key = self.inversedict[edit]
except KeyError:
print "Don't know this EditorWindow object. (rename)"
return
filename = edit.io.filename
if not filename:
if key:
del self.dict[key]
self.inversedict[edit] = None
return
filename = self.canonize(filename)
newkey = os.path.normcase(filename)
if newkey == key:
return
if newkey in self.dict:
conflict = self.dict[newkey]
self.inversedict[conflict] = None
tkMessageBox.showerror(
"Name Conflict",
"You now have multiple edit windows open for %r" % (filename,),
master=self.root)
self.dict[newkey] = edit
self.inversedict[edit] = newkey
if key:
try:
del self.dict[key]
except KeyError:
pass
def canonize(self, filename):
if not os.path.isabs(filename):
try:
pwd = os.getcwd()
except os.error:
pass
else:
filename = os.path.join(pwd, filename)
return os.path.normpath(filename)
def _test():
from idlelib.EditorWindow import fixwordbreaks
import sys
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = FileList(root)
if sys.argv[1:]:
for filename in sys.argv[1:]:
flist.open(filename)
else:
flist.new()
if flist.inversedict:
root.mainloop()
if __name__ == '__main__':
_test()
|
ddico/server-tools | refs/heads/8.0 | dead_mans_switch_server/__openerp__.py | 21 | # -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Dead man's switch (server)",
"version": "8.0.1.0.0",
"author": "Therp BV,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Monitoring",
"summary": "Be notified when customers' odoo instances go down",
"depends": [
'mail',
'web_kanban_sparkline',
],
"data": [
"data/ir_cron.xml",
"security/res_groups.xml",
"views/dead_mans_switch_log.xml",
"views/dead_mans_switch_instance.xml",
"views/menu.xml",
'security/ir.model.access.csv',
],
}
|
abramhindle/UnnaturalCodeFork | refs/heads/master | python/testdata/launchpad/lib/lp/translations/browser/hastranslationimports.py | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Browser view for IHasTranslationImports."""
__metaclass__ = type
__all__ = [
'HasTranslationImportsView',
]
import datetime
import pytz
import simplejson
from z3c.ptcompat import ViewPageTemplateFile
from zope.component import getUtility
from zope.formlib import form
from zope.formlib.widgets import DropdownWidget
from zope.interface import implements
from zope.schema import Choice
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.vocabulary import (
SimpleTerm,
SimpleVocabulary,
)
from lp import _
from lp.app.browser.launchpadform import (
action,
custom_widget,
LaunchpadFormView,
safe_action,
)
from lp.app.browser.lazrjs import vocabulary_to_choice_edit_items
from lp.app.errors import UnexpectedFormData
from lp.registry.interfaces.distribution import IDistribution
from lp.registry.interfaces.pillar import IPillarNameSet
from lp.services.propertycache import cachedproperty
from lp.services.webapp.authorization import check_permission
from lp.services.webapp.batching import TableBatchNavigator
from lp.services.webapp.vocabulary import ForgivingSimpleVocabulary
from lp.translations.enums import RosettaImportStatus
from lp.translations.interfaces.hastranslationimports import (
IHasTranslationImports,
)
from lp.translations.interfaces.translationimportqueue import (
ITranslationImportQueue,
SpecialTranslationImportTargetFilter,
)
class HasTranslationImportsView(LaunchpadFormView):
"""View class used for objects with translation imports."""
schema = IHasTranslationImports
field_names = []
custom_widget('filter_target', DropdownWidget, cssClass='inlined-widget')
custom_widget('filter_status', DropdownWidget, cssClass='inlined-widget')
custom_widget(
'filter_extension', DropdownWidget, cssClass='inlined-widget')
custom_widget('status', DropdownWidget, cssClass='inlined-widget')
translation_import_queue_macros = ViewPageTemplateFile(
'../templates/translation-import-queue-macros.pt')
page_title = "Import queue"
@property
def label(self):
"""See `LaunchpadFormView`."""
return "Translation import queue for %s" % self.context.displayname
@property
def initial_values(self):
return self._initial_values
def initialize(self):
"""See `LaunchpadFormView`."""
self._initial_values = {}
LaunchpadFormView.initialize(self)
def createFilterFieldHelper(self, name, source, title):
"""A helper method for creating filter fields."""
self._initial_values[name] = 'all'
return form.Fields(
Choice(
__name__=name,
source=source,
title=_(title)),
custom_widget=self.custom_widgets[name],
render_context=self.render_context)
def createFilterStatusField(self):
"""Create a field with a vocabulary to filter by import status.
:return: A form.Fields instance containing the status field.
"""
return self.createFilterFieldHelper(
name='filter_status',
source=TranslationImportStatusVocabularyFactory(),
title='Choose which status to show')
def createFilterFileExtensionField(self):
"""Create a field with a vocabulary to filter by file extension.
:return: A form.Fields instance containing the file extension field.
"""
return self.createFilterFieldHelper(
name='filter_extension',
source=TranslationImportFileExtensionVocabularyFactory(),
title='Show entries with this extension')
def createFilterTargetField(self):
"""Create a field with a vocabulary to filter by target.
By default this does nothing. Subclasses can override this.
:return: A form.Fields instance containing the target field or None.
"""
return None
def createEntryStatusField(self, entry):
"""Create a field with a vocabulary with entry's import status.
:return: A form.Fields instance containing the status field.
"""
name = 'status_%d' % entry.id
self._initial_values[name] = entry.status.name
return form.Fields(
Choice(
__name__=name,
source=EntryImportStatusVocabularyFactory(entry, self.user),
title=_('Select import status')),
custom_widget=self.custom_widgets['status'],
render_context=self.render_context)
def setUpFields(self):
"""See `LaunchpadFormView`."""
LaunchpadFormView.setUpFields(self)
# setup filter fields.
target_field = self.createFilterTargetField()
if target_field is not None:
self.form_fields = (target_field + self.form_fields)
self.form_fields = (
self.createFilterStatusField() +
self.createFilterFileExtensionField() +
self.form_fields)
def setUpWidgets(self):
"""See `LaunchpadFormView`."""
# The filter_target widget needs to know the selection made in the
# filter_status widget. Set up the widgets in two phases to make this
# possible.
self.widgets = form.setUpWidgets(
self.form_fields.select('filter_status'), self.prefix,
self.context, self.request, data=self.initial_values,
ignore_request=False)
self.widgets += form.setUpWidgets(
self.form_fields.omit('filter_status'), self.prefix, self.context,
self.request, data=self.initial_values, ignore_request=False)
if not self.filter_action.submitted():
self.setUpEntriesWidgets()
def setUpEntriesWidgets(self):
"""Prepare translation import entries widgets to be rendered."""
fields = form.Fields()
for entry in self.batchnav.currentBatch():
fields += self.createEntryStatusField(entry)
if len(fields) > 0:
self.form_fields += fields
self.widgets += form.setUpWidgets(
fields, self.prefix, self.context, self.request,
data=self.initial_values, ignore_request=False)
@safe_action
@action('Filter', name='filter')
def filter_action(self, action, data):
"""Handle a filter action."""
target_option = ''
if self.has_target_filter:
target_option = 'field.filter_target=%s&' % (
self.widgets['filter_target'].getInputValue())
# Redirect to the filtered URL.
self.next_url = (
'%s?%sfield.filter_status=%s&field.filter_extension=%s' % (
self.request.URL,
target_option,
self.widgets['filter_status'].getInputValue(),
self.widgets['filter_extension'].getInputValue()))
@action("Change status", name='change_status')
def change_status_action(self, action, data):
"""Handle a queue submission changing the status of its entries."""
# The user must be logged in.
if self.user is None:
raise UnexpectedFormData(
'Users not logged cannot submit this form.')
number_of_changes = 0
for form_item in data:
if not form_item.startswith('status_'):
# We are not interested on this form_item.
continue
# It's an form_item to handle.
try:
# 'ignored' is 'status' due to the previous check, so we could
# ignore that part.
ignored, id_string = form_item.split('_')
# The id is an integer
id = int(id_string)
except ValueError:
# We got an form_item with more than one '_' char or with an
# id that is not a number, that means that someone is playing
# badly with our system so it's safe to just ignore the
# request.
raise UnexpectedFormData(
'Ignored your request because it is broken.')
# Get the entry we are working on.
import_queue_set = getUtility(ITranslationImportQueue)
entry = import_queue_set.get(id)
new_status_name = data.get(form_item)
if new_status_name == entry.status.name:
# The entry's status didn't change we can jump to the next
# entry.
continue
# The status changed.
number_of_changes += 1
# Determine status enum from from value.
new_status = None
for status in RosettaImportStatus.items:
if new_status_name == status.name:
new_status = status
break
if new_status is None:
# We are trying to set a bogus status.
# That means that it's a broken request.
raise UnexpectedFormData(
'Ignored the request to change the status from %s to %s.'
% (entry.status.name, new_status_name))
else:
# This will raise an exception if the user is not authorized.
entry.setStatus(new_status, self.user)
# Update the date_status_change field.
UTC = pytz.timezone('UTC')
entry.date_status_changed = datetime.datetime.now(UTC)
if number_of_changes == 0:
self.request.response.addWarningNotification(
"Ignored your status change request as you didn't select any"
" change.")
else:
self.request.response.addInfoNotification(
"Changed the status of %d queue entries." % number_of_changes)
def getEntriesFilteringOptions(self):
"""Return the selected filtering."""
target = None
file_extension = None
status = None
target_widget = self.widgets.get('filter_target')
if target_widget is not None and target_widget.hasValidInput():
target = target_widget.getInputValue()
pillar_name_set = getUtility(IPillarNameSet)
if target == 'all':
target = None
elif target.startswith('[') and target.endswith(']'):
# This is a SpecialTranslationImportTargetFilter.
target_code = target[1:-1]
target = None
for enum_item in SpecialTranslationImportTargetFilter.items:
if enum_item.name == target_code:
target = enum_item
if target is None:
raise UnexpectedFormData(
"Got a bad special target option: %s" % target)
elif '/' in target:
# It's a distroseries, for them we have
# 'distribution.name/distroseries.name' to identify it.
distribution_name, distroseries_name = target.split('/', 1)
pillar = pillar_name_set.getByName(distribution_name)
if IDistribution.providedBy(pillar):
target = pillar.getSeries(distroseries_name)
else:
raise UnexpectedFormData(
"Got a bad target option %s" % target)
else:
target = pillar_name_set.getByName(target)
filter_extension_widget = self.widgets.get('filter_extension')
if filter_extension_widget.hasValidInput():
file_extension = filter_extension_widget.getInputValue()
if file_extension == 'all':
file_extension = None
filter_status_widget = self.widgets.get('filter_status')
if filter_status_widget.hasValidInput():
status = filter_status_widget.getInputValue()
if status == 'all':
status = None
else:
status = RosettaImportStatus.items[status]
return target, file_extension, status
@property
def translation_import_queue_content(self):
"""Macro displaying the import queue content."""
macros = self.translation_import_queue_macros.macros
return macros['translation-import-queue-content']
@property
def entries(self):
"""Return the entries in the queue for this context."""
target, file_extension, status = self.getEntriesFilteringOptions()
assert target is None, (
'Inherit from this view class if target filter is being used.')
return IHasTranslationImports(
self.context).getTranslationImportQueueEntries(
import_status=status, file_extension=file_extension)
@property
def has_target_filter(self):
"""Whether the form should show the target filter."""
return self.widgets.get('filter_target') is not None
@cachedproperty
def batchnav(self):
"""Return batch object for this page."""
return TableBatchNavigator(self.entries, self.request)
@property
def choice_confs_js(self):
""""Generate configuration for lazr-js widget.
Only editable items are included in the list.
"""
confs = []
for entry in self.batchnav.batch:
if check_permission('launchpad.Edit', entry):
confs.append(self.generateChoiceConfForEntry(entry))
return 'var choice_confs = %s;' % simplejson.dumps(confs)
def generateChoiceConfForEntry(self, entry):
disabled_items = [
item.value for item in RosettaImportStatus
if not entry.canSetStatus(item.value, self.user)]
items = vocabulary_to_choice_edit_items(
RosettaImportStatus, disabled_items=disabled_items,
css_class_prefix='translationimportstatus')
return {
'value': entry.status.title,
'items': items}
class EntryImportStatusVocabularyFactory:
"""Factory for a vocabulary containing a list of statuses for import."""
implements(IContextSourceBinder)
def __init__(self, entry, user):
"""Create a EntryImportStatusVocabularyFactory.
:param entry: The ITranslationImportQueueEntry related with this
vocabulary.
"""
self.entry = entry
self.user = user
def __call__(self, context):
terms = []
for status in RosettaImportStatus.items:
if (status == self.entry.status or
self.entry.canSetStatus(status, self.user)):
terms.append(
SimpleTerm(status.name, status.name, status.title))
return SimpleVocabulary(terms)
class TranslationImportStatusVocabularyFactory:
"""Factory for a vocabulary containing a list of import statuses."""
implements(IContextSourceBinder)
def __call__(self, context):
terms = [SimpleTerm('all', 'all', 'All statuses')]
for status in RosettaImportStatus.items:
terms.append(SimpleTerm(status.name, status.name, status.title))
return SimpleVocabulary(terms)
class TranslationImportFileExtensionVocabularyFactory:
"""Factory for a vocabulary containing a list of available extensions."""
implements(IContextSourceBinder)
def __call__(self, context):
file_extensions = ('po', 'pot')
all_files = SimpleTerm('all', 'all', 'All files')
terms = [all_files]
for extension in file_extensions:
title = 'Only %s files' % extension
terms.append(SimpleTerm(extension, extension, title))
# We use a ForgivingSimpleVocabulary because we don't care if a user
# provides an invalid value. If they do we just ignore it and show
# them all files.
return ForgivingSimpleVocabulary(terms, default_term=all_files)
|
paplorinc/intellij-community | refs/heads/master | python/testData/refactoring/move/referenceToClassWithNewInMovedSymbol/after/src/classFile.py | 62 | from collections import namedtuple
class Pipeline(namedtuple('_Pipeline', 'name')):
def __new__(cls, name):
return super(Pipeline, cls).__new__(cls, name)
def __init__(self, name):
pass
|
sam81/pychoacoustics | refs/heads/master | pychoacoustics/default_experiments/sig_detect_multi.py | 1 | # -*- coding: utf-8 -*-
"""
Measure d' for the detection of a pure tone in a Yes/No task.
The available fields are:
- Frequency (Hz) :
The frequency of the pure tone signal
- Duration (ms) :
Tone duration (excluding ramps), in ms
- Ramps (ms) :
Duration of each ramp, in ms
- Level (dB SPL):
Level of the signal in dB SPL.
The available choosers are:
- Ear: [``Right``, ``Left``, ``Both``]
The ear to which the signal will be presented
"""
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
#from PyQt4 import QtGui, QtCore
#from PyQt4.QtGui import QApplication
import random, sys
from .._version_info import*
from pychoacoustics.sndlib import*
def initialize_sig_detect_multi(prm):
exp_name = "Demo Signal Detection Multiple Constants"
prm["experimentsChoices"].append(exp_name)
prm[exp_name] = {}
prm[exp_name]["paradigmChoices"] = ["Multiple Constants 1-Interval 2-Alternatives"]
prm[exp_name]["opts"] = ["hasFeedback", "hasNDifferencesChooser"]
prm[exp_name]["buttonLabels"] = ["Yes", "No"]
prm[exp_name]['defaultNIntervals'] = 1
prm[exp_name]['defaultNAlternatives'] = 2
prm[exp_name]["execString"] = "sig_detect_multi"
prm[exp_name]["version"] = __name__ + ' ' + pychoacoustics_version + ' ' + pychoacoustics_builddate
return prm
def select_default_parameters_sig_detect_multi(parent, par):
nDifferences = par['nDifferences']
field = []
fieldLabel = []
chooser = []
chooserLabel = []
chooserOptions = []
for i in range(nDifferences):
fieldLabel.append(parent.tr("Frequency (Hz) " + str(i+1)))
field.append(1000+i)
fieldLabel.append(parent.tr("Duration (ms)"))
field.append(2)
fieldLabel.append(parent.tr("Ramps (ms)"))
field.append(4)
fieldLabel.append(parent.tr("Level (dB SPL)"))
field.append(40)
chooserOptions.append([parent.tr("Right"), parent.tr("Left"), parent.tr("Both")])
chooserLabel.append(parent.tr("Channel:"))
chooser.append(parent.tr("Both"))
prm = {}
prm['field'] = field
prm['fieldLabel'] = fieldLabel
prm['chooser'] = chooser
prm['chooserLabel'] = chooserLabel
prm['chooserOptions'] = chooserOptions
return prm
def doTrial_sig_detect_multi(parent):
currBlock = 'b'+ str(parent.prm['currentBlock'])
if parent.prm['startOfBlock'] == True:
parent.writeResultsHeader('log')
parent.prm['trialTypes'] = ["signal_present","signal_absent"]
parent.prm['conditions'] = []
nDifferences = parent.prm['nDifferences']
for i in range(nDifferences):
parent.prm['conditions'].append('Frequency (Hz) ' + str(parent.prm[currBlock]['field'][parent.prm['fieldLabel'].index(parent.tr("Frequency (Hz) ") + str(i+1))])) #this is for the sortResponse routine
parent.currentCondition = parent.prm['conditions'][parent.prm['currentDifference']]
parent.currentSubcondition = random.choice(parent.prm['trialTypes'])
if parent.currentSubcondition == "signal_present":
parent.correctButton = 1
elif parent.currentSubcondition == "signal_absent":
parent.correctButton = 2
currentFreq = parent.prm[currBlock]['field'][parent.prm['fieldLabel'].index(parent.tr("Frequency (Hz) ") + str(parent.prm['conditions'].index(parent.currentCondition)+1))]
dur = parent.prm[currBlock]['field'][parent.prm['fieldLabel'].index("Duration (ms)")]
ramps = parent.prm[currBlock]['field'][parent.prm['fieldLabel'].index("Ramps (ms)")]
lev = parent.prm[currBlock]['field'][parent.prm['fieldLabel'].index("Level (dB SPL)")]
phase = 0
channel = parent.prm[currBlock]['chooser'][parent.prm['chooserLabel'].index(parent.tr("Channel:"))]
if parent.currentSubcondition == 'signal_absent':
lev = -200
sig = pureTone(currentFreq, phase, lev, dur, ramps, channel, parent.prm['sampRate'], parent.prm['maxLevel'])
parent.playSequentialIntervals([sig])
|
ric2b/Vivaldi-browser | refs/heads/master | chromium/third_party/blink/tools/blinkpy/web_tests/models/test_configuration_unittest.py | 2 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.web_tests.models.test_configuration import SpecifierSorter, TestConfiguration, TestConfigurationConverter
def make_mock_all_test_configurations_set():
all_test_configurations = set()
for version, architecture in (('snowleopard', 'x86'),
('win7', 'x86'),
('vista', 'x86'),
('precise', 'x86_64'),
('trusty', 'x86_64')):
for build_type in ('debug', 'release'):
all_test_configurations.add(TestConfiguration(version, architecture, build_type))
return all_test_configurations
MOCK_MACROS = {
'mac': ['snowleopard'],
'win': ['vista', 'win7'],
'linux': ['precise', 'trusty'],
}
class TestConfigurationTest(unittest.TestCase):
def test_items(self):
config = TestConfiguration('win7', 'x86', 'release')
result_config_dict = {}
for category, specifier in config.items():
result_config_dict[category] = specifier
self.assertEqual({'version': 'win7', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
def test_keys(self):
config = TestConfiguration('win7', 'x86', 'release')
result_config_keys = []
for category in config.keys():
result_config_keys.append(category)
self.assertEqual(set(['version', 'architecture', 'build_type']), set(result_config_keys))
def test_str(self):
config = TestConfiguration('win7', 'x86', 'release')
self.assertEqual('<win7, x86, release>', str(config))
def test_repr(self):
config = TestConfiguration('win7', 'x86', 'release')
self.assertEqual("TestConfig(version='win7', architecture='x86', build_type='release')", repr(config))
def test_hash(self):
config_dict = {}
config_dict[TestConfiguration('win7', 'x86', 'release')] = True
self.assertIn(TestConfiguration('win7', 'x86', 'release'), config_dict)
self.assertTrue(config_dict[TestConfiguration('win7', 'x86', 'release')])
def query_unknown_key():
return config_dict[TestConfiguration('win7', 'x86', 'debug')]
with self.assertRaises(KeyError):
query_unknown_key()
self.assertIn(TestConfiguration('win7', 'x86', 'release'), config_dict)
self.assertNotIn(TestConfiguration('win7', 'x86', 'debug'), config_dict)
configs_list = [TestConfiguration('win7', 'x86', 'release'), TestConfiguration(
'win7', 'x86', 'debug'), TestConfiguration('win7', 'x86', 'debug')]
self.assertEqual(len(configs_list), 3)
self.assertEqual(len(set(configs_list)), 2)
def test_eq(self):
self.assertEqual(TestConfiguration('win7', 'x86', 'release'), TestConfiguration('win7', 'x86', 'release'))
self.assertNotEquals(TestConfiguration('win7', 'x86', 'release'), TestConfiguration('win7', 'x86', 'debug'))
def test_values(self):
config = TestConfiguration('win7', 'x86', 'release')
result_config_values = []
for value in config.values():
result_config_values.append(value)
self.assertEqual(set(['win7', 'x86', 'release']), set(result_config_values))
class SpecifierSorterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_init(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_for_specifier('win7'), 'version')
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
def test_add_specifier(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter.add_specifier('version', 'control')
self.assertEqual(sorter.category_for_specifier('control'), 'version')
sorter.add_specifier('version', 'one')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
sorter.add_specifier('architecture', 'renaissance')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
self.assertEqual(sorter.category_for_specifier('renaissance'), 'architecture')
def test_add_macros(self):
sorter = SpecifierSorter(self._all_test_configurations)
sorter.add_macros(MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
self.assertEqual(sorter.category_for_specifier('win'), 'version')
self.assertEqual(sorter.category_for_specifier('x86'), 'architecture')
def test_category_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_priority('version'), 0)
self.assertEqual(sorter.category_priority('build_type'), 2)
def test_specifier_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.specifier_priority('x86'), 1)
self.assertEqual(sorter.specifier_priority('snowleopard'), 0)
def test_sort_specifiers(self):
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.sort_specifiers(set()), [])
self.assertEqual(sorter.sort_specifiers(set(['x86'])), ['x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), [
'snowleopard', 'win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
class TestConfigurationConverterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_symmetric_difference(self):
self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
self.assertEqual(TestConfigurationConverter.symmetric_difference(
[set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
def test_to_config_set(self):
converter = TestConfigurationConverter(self._all_test_configurations)
self.assertEqual(converter.to_config_set(set()), self._all_test_configurations)
self.assertEqual(converter.to_config_set(set(['foo'])), set())
self.assertEqual(converter.to_config_set(set(['win7', 'foo'])), set())
errors = []
self.assertEqual(converter.to_config_set(set(['win7', 'foo']), errors), set())
self.assertEqual(errors, ["Unrecognized specifier 'foo'"])
self.assertEqual(converter.to_config_set(set(['win7', 'x86_64'])), set())
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win7', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('precise', 'x86_64', 'debug'),
TestConfiguration('trusty', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['x86_64'])), configs_to_match)
configs_to_match = set([
TestConfiguration('trusty', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'debug'),
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('precise', 'x86_64', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['trusty', 'precise', 'snowleopard'])),
configs_to_match)
configs_to_match = set([
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['snowleopard', 'x86'])),
configs_to_match)
configs_to_match = set([
TestConfiguration('trusty', 'x86_64', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(
converter.to_config_set(set(['trusty', 'snowleopard', 'release'])),
configs_to_match)
def test_macro_expansion(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('trusty', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'trusty', 'release'])),
configs_to_match)
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
def test_to_specifier_lists(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
self.assertEqual(converter.to_specifiers_list(set()), [])
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'win7'])])
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win7'])])
configs_to_match = set([
TestConfiguration('precise', 'x86_64', 'debug'),
TestConfiguration('trusty', 'x86_64', 'debug'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match),
[set(['release', 'win7']), set(['debug', 'linux'])])
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('trusty', 'x86_64', 'debug'),
TestConfiguration('precise', 'x86_64', 'debug'),
TestConfiguration('trusty', 'x86_64', 'debug'),
TestConfiguration('precise', 'x86_64', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match),
[set(['release', 'win7']), set(['debug', 'linux'])])
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release'])])
configs_to_match = set([
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win7', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'debug'),
TestConfiguration('trusty', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match),
[set(['win7']), set(['release', 'mac', 'trusty'])])
def test_macro_collapsing(self):
macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
specifiers_list = [set(['john', 'godzilla', 'bob', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['people', 'godzilla'])])
specifiers_list = [set(['john', 'godzilla', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['john', 'godzilla', 'alice', 'godzilla'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob', 'alice', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['foo', 'godzilla', 'people'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob']), set(['bar', 'baz']), set(['people', 'alice', 'bob', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['bob', 'foo', 'godzilla']), set(['foo']), set(['people'])])
def test_converter_macro_collapsing(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
def test_specifier_converter_access(self):
specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
self.assertEqual(specifier_sorter.category_for_specifier('snowleopard'), 'version')
self.assertEqual(specifier_sorter.category_for_specifier('mac'), 'version')
|
hwu25/AppPkg | refs/heads/trunk | Applications/Python/Python-2.7.2/Lib/encodings/cp875.py | 93 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'|' # 0x6A -> VERTICAL LINE
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xa8' # 0x70 -> DIAERESIS
u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
u'\xb4' # 0xA0 -> ACUTE ACCENT
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
u'\u03be' # 0xAB -> GREEK SMALL LETTER XI
u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
u'\xa3' # 0xB0 -> POUND SIGN
u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
u'\u2015' # 0xCF -> HORIZONTAL BAR
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb1' # 0xDA -> PLUS-MINUS SIGN
u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
u'\x1a' # 0xDC -> SUBSTITUTE
u'\u0387' # 0xDD -> GREEK ANO TELEIA
u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
u'\xa6' # 0xDF -> BROKEN BAR
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\x1a' # 0xE1 -> SUBSTITUTE
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xa7' # 0xEB -> SECTION SIGN
u'\x1a' # 0xEC -> SUBSTITUTE
u'\x1a' # 0xED -> SUBSTITUTE
u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xEF -> NOT SIGN
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xa9' # 0xFB -> COPYRIGHT SIGN
u'\x1a' # 0xFC -> SUBSTITUTE
u'\x1a' # 0xFD -> SUBSTITUTE
u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
nrego/westpa | refs/heads/master | lib/examples/stringmethodexamples/examples/DicksonRingPotential/analysis/calculate_distribution.py | 2 | import numpy as np
import h5py
import argparse
import sys
import os
import west
pcoord_dtype = np.float32
nbins = 100
print '-----------------------'
print os.path.basename(__file__)
print '-----------------------'
env = os.environ
for k in env:
if 'WEST' in k:
print k, env[k]
parser = argparse.ArgumentParser('calculate_distribution', description='''\
Calculate distribution statistics
''')
west.rc.add_args(parser)
parser.add_argument('-o', dest='h5out', help='name of output file')
args = parser.parse_args()
west.rc.process_args(args)
data_manager = west.rc.get_data_manager()
data_manager.open_backing(mode='r')
h5out = h5py.File(args.h5out, 'a')
n_iters = data_manager.current_iteration - 1
iter_prec = data_manager.iter_prec
if 'data' in h5out:
data_ds = h5out['data']
dshape = data_ds.shape
if dshape[0] < n_iters:
data_ds.resize((n_iters - 2, nbins))
start_iter = h5out.attrs['last_completed_iter']
else:
data_ds = h5out.require_dataset('data', (n_iters - 2, nbins), np.float64, exact=False, maxshape=(None, nbins))
start_iter = 2
h5out.attrs['last_completed_iter'] = 2
for iiter in xrange(start_iter, n_iters):
if iiter % 1000 == 0:
print 'Processing {} of {}'.format(iiter, n_iters - 1)
h5out.flush()
try:
iter_group = data_manager.get_iter_group(iiter)
weight = iter_group['seg_index']['weight']
crd = iter_group['pcoord'][:,-1,:]
assert weight.shape[0] == crd.shape[0]
h,edges = np.histogram(np.arctan2(crd[:,1],crd[:,0])/np.pi,weights=weight,range=(-1.0,1.0),bins=nbins)
data_ds[iiter-2,:] = h
h5out.attrs['last_completed_iter'] = iiter
except:
print 'Error in processing iteration: {}'.format(iiter)
print sys.exc_info()
break
h5out.close()
data_manager.close_backing()
|
dylanlesko/youtube-dl | refs/heads/master | youtube_dl/extractor/quickvid.py | 113 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
determine_ext,
int_or_none,
)
class QuickVidIE(InfoExtractor):
_VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
_TEST = {
'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
'md5': 'c0c72dd473f260c06c808a05d19acdc5',
'info_dict': {
'id': 'sUQT3RCG8dx',
'ext': 'mp4',
'title': 'Nick Offerman\'s Summer Reading Recap',
'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
'view_count': int,
},
'skip': 'Not accessible from Travis CI server',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
view_count = int_or_none(self._html_search_regex(
r'(?s)<div id="views">(.*?)</div>',
webpage, 'view count', fatal=False))
video_code = self._search_regex(
r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
formats = [
{
'url': compat_urlparse.urljoin(url, src),
'format_id': determine_ext(src, None),
} for src in re.findall('<source\s+src="([^"]+)"', video_code)
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': self._og_search_thumbnail(webpage),
'view_count': view_count,
}
|
jbgastineau/cxphasing | refs/heads/master | cxphasing/__init__.py | 1 | __all__ = ["CXData", "CXData2", "CXParams", "CXPhasing", "CXPhasing2", "CXFileReader", "CXUtils"]
import cxparams
from cxparams import CXParams
from CXFileReader import CXFileReader
from CXData import CXData
from CXData import CXModal
from CXPhasing import CXPhasing
import CXUtils as CXU
|
debugger06/MiroX | refs/heads/master | resources/devices/android.py | 3 | from miro import app
from miro import prefs
from miro.devices import DeviceInfo, MultipleDeviceInfo
from miro.gtcache import gettext as _
defaults = {
'audio_conversion': 'mp3',
'container_types': 'mp3 wav asf isom ogg mpeg avi'.split(),
'audio_types': 'mp* wmav* aac pcm* vorbis'.split(),
'video_types': 'theora h264 mpeg* wmv*'.split(),
'mount_instructions': _("Your phone must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
'video_path': u'Miro',
'audio_path': u'Miro'
}
tablet_defaults = defaults.copy()
tablet_defaults['mount_instructions'] = _(
"Your tablet must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)})
htc_hero = DeviceInfo(u'HTC Hero',
video_conversion='hero',
video_path=u'Video',
audio_path=u'Music')
htc_evo = DeviceInfo(u'HTC EVO',
video_conversion='epic',
video_path=u'Video',
audio_path=u'Music')
htc_evo_4g = DeviceInfo(u'HTC EVO 4G',
video_conversion='epic')
htc_evo_3d = DeviceInfo('HTC EVO 3D',
video_conversion='sensationevo3d')
htc_legend = DeviceInfo(u'HTC Legend',
video_conversion='dreamg1',
video_path=u'/media/video',
audio_path=u'/media/audio')
tmobile_g1 = DeviceInfo(u'T-Mobile G1',
video_conversion='dreamg1')
tmobile_g2 = DeviceInfo(u'T-Mobile G2',
video_conversion='g2')
htc_vision = DeviceInfo(u'HTC Vision',
video_conversion='g2')
htc_desire_z = DeviceInfo(u'HTC Desire Z',
video_conversion='g2')
htc_incredible = DeviceInfo(u'HTC Droid Incredible',
video_conversion='epic')
htc_incredible_2 = DeviceInfo(u'HTC Droid Incredible 2',
video_conversion='epic')
htc_sensation = DeviceInfo(u'HTC Sensation',
video_conversion='epic')
htc_aria = DeviceInfo(u'HTC Aria',
video_conversion='hero')
generic_htc = DeviceInfo(_('Generic %(name)s Device', {'name': 'HTC'}),
video_conversion='hero')
htc_android_device = MultipleDeviceInfo(
'HTC Android Phone', [htc_hero, htc_evo, htc_evo_4g, htc_evo_3d,
htc_legend,
tmobile_g1, tmobile_g2, htc_vision, htc_desire_z,
htc_incredible, htc_incredible_2, htc_sensation,
htc_aria,
generic_htc],
vendor_id=0x0bb4,
product_id=0x0ff9,
**defaults)
htc_desire = DeviceInfo(u'HTC Desire',
vendor_id=0x0bb4,
product_id=0x0c87,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_desire_hd = DeviceInfo(u'HTC Desire HD',
vendor_id=0xbb4,
product_id=0x0ca2,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_thunderbolt = DeviceInfo(u'HTC Thunderbolt',
vendor_id=0x0bb4,
product_id=0x0ca4,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_sensation = DeviceInfo(u'HTC Sensation',
vendor_id=0x0bb4,
product_id=0x0c86,
device_name='HTC Android Phone',
video_conversion='sensationevo3d',
**defaults)
nexus_one = DeviceInfo(u'Nexus One',
vendor_id=0x18d1,
product_id=0x4e11,
device_name='Google, Inc.Nexus One',
video_conversion='nexusone',
**defaults)
# the Droid apparently can have two different USB IDs
motorola_droid_one = DeviceInfo(u'Motorola Droid',
vendor_id=0x22b8,
product_id=0x41db,
device_name='Motorola A855',
video_conversion='droid',
**defaults)
motorola_droid_two = DeviceInfo(u'Motorola Droid',
vendor_id=0x22b,
product_id=0x41d9,
device_name='Motorola A855',
video_conversion='droid',
**defaults)
motorola_droid2 = DeviceInfo(u'Motorola Droid 2',
vendor_id=0x22b8,
product_id=0x42a3,
device_name='Motorola A955',
video_conversion='droid',
**defaults)
motorola_droidx = DeviceInfo(u'Motorola Droid X',
vendor_id=0x22b8,
product_id=0x4285,
device_name='Motorola MB810',
video_conversion='droid',
**defaults)
motorola_xoom = DeviceInfo(u'Motorola Xoom',
vendor_id=0x18d1,
product_id=0x70a8,
device_name='Motorola MZ604',
video_conversion='xoom',
**tablet_defaults)
galaxy_s2 = DeviceInfo(u'Galaxy S2',
vendor_id=0x04e8,
product_id=0x685e,
device_name='Android UMS Composite',
video_conversion='epic',
**defaults)
galaxy_tab = DeviceInfo(u'Galaxy Tab',
vendor_id=0x04e8,
product_id=0x681d,
device_name='SAMSUNG SGH-T849',
video_conversion='galaxytab',
**tablet_defaults)
epic = DeviceInfo(u'Epic',
vendor_id=0x04e8,
product_id=0x6601,
device_name="SAMSUNG SPH-D700 Card",
video_conversion='epic',
**defaults)
lg_optimus_2x = DeviceInfo(u'Optimus 2x',
vendor_id=0x1004,
product_id=0x618e,
device_name='LGE P990',
video_conversion='epic',
**defaults)
lg_optimus_s = DeviceInfo(
u'Optimus S',
vendor_id=0x1004,
product_id=0x618E,
device_name='GOOGLE Mass storage',
video_conversion='hero',
audio_conversion='mp3',
container_types='mp3 wav asf isom ogg mpeg avi'.split(),
audio_types='mp* wmav* aac pcm* vorbis'.split(),
video_types='theora h264 mpeg* wmv*'.split(),
mount_instructions=_("Your phone must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
video_path=u'Media/Video',
audio_path=u'Media/Audio')
nookcolor = DeviceInfo(
name=u'MyNOOKColor',
device_name='B&N Ebook Disk',
vendor_id=0x2080,
product_id=0x0002,
# FIXME - the Nook Color has no way to play videos, so this should
# really be disabled.
video_conversion='copy',
video_path=u'My Files/Video',
audio_conversion='mp3',
audio_path=u'My Files/Music',
container_types=['mp3', 'isom'],
audio_types=['mp*', 'aac'],
video_types=[],
mount_instructions=_('Your Nook Color must be connected to your computer '
'and in USB Mode to sync files to it.\n')
)
toshiba_thrive = DeviceInfo(
u'Toshiba Thrive',
vendor_id=0x18d1,
product_id=0x7102,
device_name='AT100',
video_conversion='xoom',
audio_conversion='mp3',
container_types='mp3 wav asf isom ogg mpeg avi'.split(),
audio_types='mp* wmav* aac pcm* vorbis'.split(),
video_types='theora h264 mpeg* wmv*'.split(),
mount_instructions=_("Your tablet must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
video_path=u'Movies',
audio_path=u'Music')
devices = [htc_android_device, htc_desire, htc_desire_hd, htc_thunderbolt,
htc_sensation, nexus_one,
motorola_droid_one, motorola_droid_two, motorola_droid2,
motorola_droidx, motorola_xoom, lg_optimus_2x, lg_optimus_s,
galaxy_s2, galaxy_tab, epic, nookcolor, toshiba_thrive]
|
samdesbois/Symfony_tuto | refs/heads/master | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
savoirfairelinux/mod-booster-snmp | refs/heads/master | module/module.py | 1 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2014:
# Thibault Cohen, thibault.cohen@savoirfairelinux.com
#
# This file is part of SNMP Booster Shinken Module.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with SNMP Booster Shinken Module.
# If not, see <http://www.gnu.org/licenses/>.
"""
Entry file for SNMP Booster module
"""
from shinken.log import logger
from snmpbooster_arbiter import SnmpBoosterArbiter
from snmpbooster_poller import SnmpBoosterPoller
from snmpbooster_scheduler import SnmpBoosterScheduler
properties = {
'daemons': ['poller', 'scheduler', 'arbiter'],
'type': 'snmp_booster',
'external': False,
'phases': ['running', 'late_configuration'],
# To be a real worker module, you must set this
'worker_capable': True,
}
def get_instance(mod_conf):
"""called by the plugin manager to get a poller"""
logger.info("[SnmpBooster] [code 0101] Loading SNMP Booster module "
"for plugin %s" % mod_conf.get_name())
# Check if the attribute loaded_by is set
if not hasattr(mod_conf, 'loaded_by'):
message = ("[SnmpBooster] [code 0102] Couldn't find 'loaded_by' "
"configuration directive.")
logger.error(message)
raise Exception(message)
# Check if the attribute loaded_by is correctly used
if mod_conf.loaded_by not in mod_conf.properties['daemons']:
message = ("[SnmpBooster] [code 0103] 'loaded_by' attribute must be "
"in %s" % str(mod_conf.properties['daemons']))
logger.error(message)
raise Exception(message)
# Get class name (arbiter, scheduler or poller)
class_name = "SnmpBooster%s" % mod_conf.loaded_by.capitalize()
# Instance it
instance = globals()[class_name](mod_conf)
# Return it
return instance
|
trabucayre/gnuradio | refs/heads/master | gr-qtgui/python/qtgui/dialgauge.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
from PyQt5.QtWidgets import QFrame, QHBoxLayout, QVBoxLayout, QLabel
from PyQt5.QtGui import QPainter, QColor, QPen, QFont, QFontMetricsF
from PyQt5 import QtCore
from PyQt5.QtCore import Qt as Qtc
from gnuradio import gr
import pmt
class LabeledDialGauge(QFrame):
# Positions: 1 = above, 2=below, 3=left, 4=right
def __init__(self, lbl='', barColor='blue', backgroundColor='white', fontColor='black',
minValue=0, maxValue=100, maxSize=80, position=1,
isFloat=False, showValue=False, fixedOrMin=True, parent=None):
QFrame.__init__(self, parent)
self.numberControl = DialGauge(barColor, backgroundColor, fontColor, minValue,
maxValue, maxSize, isFloat, showValue, fixedOrMin, parent)
if position < 3:
layout = QVBoxLayout()
else:
layout = QHBoxLayout()
self.lbl = lbl
self.showvalue = showValue
self.isFloat = isFloat
self.lblcontrol = QLabel(lbl, self)
self.lblcontrol.setAlignment(Qtc.AlignCenter)
# For whatever reason, the progressbar doesn't show the number in the bar if it's
# vertical, only if it's horizontal
if len:
self.lblcontrol.setText(lbl)
if fontColor != 'default':
self.lblcontrol.setStyleSheet("QLabel { color : " + fontColor + "; }")
# add top or left
if len:
if position == 1 or position == 3:
layout.addWidget(self.lblcontrol)
else:
self.hasLabel = False
layout.addWidget(self.numberControl)
# Add bottom or right
if len:
if position == 2 or position == 4:
layout.addWidget(self.lblcontrol)
layout.setAlignment(Qtc.AlignCenter | Qtc.AlignVCenter)
self.setLayout(layout)
self.show()
def setValue(self, new_value):
self.numberControl.setValue(new_value)
class DialGauge(QFrame):
def __init__(self, barColor='blue', backgroundColor='white', fontColor='black',
minValue=0, maxValue=100, maxSize=80,
isFloat=False, showValue=False, fixedOrMin=True, parent=None):
QFrame.__init__(self, parent)
self.maxSize = maxSize
super().setMinimumSize(maxSize, maxSize)
if fixedOrMin:
super().setMaximumSize(maxSize, maxSize)
self.backgroundColor = backgroundColor
self.barColor = barColor
self.fontColor = fontColor
self.isFloat = isFloat
self.showValue = showValue
self.value = minValue
self.minValue = minValue
self.maxValue = maxValue
self.textfont = QFont(self.font())
self.textfont.setPixelSize(16)
self.metrics = QFontMetricsF(self.textfont)
self.startAngle = 0.0
self.endAngle = 360.0
self.degScaler = 16.0 # The span angle must be specified in 1/16 of a degree units
self.penWidth = max(int(0.1 * maxSize), 6)
self.halfPenWidth = int(self.penWidth / 2)
def getValue(self):
if self.isFloat:
return float(self.value)
else:
return int(self.value)
def setValue(self, new_value):
if new_value > self.maxValue:
new_value = self.maxValue
elif new_value < self.minValue:
new_value = self.minValue
self.value = float(new_value)
super().update()
def paintEvent(self, event):
super().paintEvent(event)
size = self.size()
percentRange = float(self.value - self.minValue) / float(self.maxValue - self.minValue)
endAngle = self.startAngle + round(percentRange * float(self.endAngle - self.startAngle), 0)
# Now convert angles to 1/16 scale
startAngle = int(round(self.startAngle * self.degScaler, 0))
endAngle = int(round(endAngle * self.degScaler, 0))
rect = QtCore.QRect(self.halfPenWidth, self.halfPenWidth, size.width()-self.penWidth,
size.height()-self.penWidth)
# Set up the painting canvass
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.showValue:
painter.setFont(self.textfont)
painter.setPen(QPen(QColor(self.fontColor)))
if self.isFloat:
printText = "%.2f" % self.value
else:
printText = str(int(self.value))
painter.drawText(size.width()/2-self.metrics.width(printText)/2, size.height()/2,
printText)
painter.save()
painter.translate(self.width(), 0)
painter.rotate(90.0)
# First draw complete circle
painter.setPen(QPen(QColor(self.backgroundColor), self.penWidth))
painter.drawArc(rect, startAngle, self.endAngle*self.degScaler)
# First draw complete circle
painter.setPen(QPen(QColor(self.barColor), self.penWidth))
painter.drawArc(rect, startAngle, -endAngle)
painter.setPen(QPen(QColor('darkgray'), 2))
painter.drawEllipse(1, 1, rect.width()+self.penWidth-2, rect.width()+self.penWidth-2)
painter.drawEllipse(1+self.penWidth, 1+self.penWidth, rect.width()-self.penWidth-2,
rect.width()-self.penWidth-2)
painter.restore()
painter.end()
class GrDialGauge(gr.sync_block, LabeledDialGauge):
"""
This block creates a dial-style gauge. The value can be set
either with a variable or an input message.
"""
def __init__(self, lbl='', barColor='blue', backgroundColor='white', fontColor='black',
minValue=0, maxValue=100, maxSize=80,
position=1, isFloat=False, showValue=False, fixedOrMin=True, parent=None):
gr.sync_block.__init__(self, name="DialGauge", in_sig=None, out_sig=None)
LabeledDialGauge.__init__(self, lbl, barColor, backgroundColor, fontColor, minValue,
maxValue, maxSize, position, isFloat, showValue, fixedOrMin,
parent)
self.lbl = lbl
if minValue > maxValue:
gr.log.error("Min value is greater than max value.")
sys.exit(1)
self.message_port_register_in(pmt.intern("value"))
self.set_msg_handler(pmt.intern("value"), self.msgHandler)
def msgHandler(self, msg):
try:
new_val = pmt.to_python(pmt.cdr(msg))
if type(new_val) == float or type(new_val) == int:
super().setValue(new_val)
else:
gr.log.error("Value received was not an int or a float. "
"Received %s" % str(type(new_val)))
except Exception as e:
gr.log.error("Error with message conversion: %s" % str(e))
def setValue(self, new_value):
super().setValue(new_value)
|
adam-pawluczuk/pysorter | refs/heads/master | tests/TestCase.py | 1 | # coding=utf-8
class TestCase(object):
def test_sort_imports(self):
source = (
u'import logging\n'
u'from security import login_required\n'
u'from services import vehicle_service\n'
u'from services import company_service\n'
u'from models.commission import Commission\n'
u'from bl_services import estimates_service\n'
u'from forms.errors import FormValidationError\n'
u'from controllers.estimates.add import AddEstimateHandler\n'
)
print source
|
andrewleech/script.module.raven | refs/heads/master | lib/raven/scripts/__init__.py | 42 | """
raven.scripts
~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
|
vrenaville/ngo-addons-backport | refs/heads/master | addons/l10n_be/wizard/l10n_be_vat_intra.py | 25 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - make the 'mand_id' field optional
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.report import report_sxw
class partner_vat_intra(osv.osv_memory):
"""
Partner Vat Intra
"""
_name = "partner.vat.intra"
_description = 'Partner VAT Intra'
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
def _get_europe_country(self, cursor, user, context=None):
return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])])
_columns = {
'name': fields.char('File Name', size=32),
'period_code': fields.char('Period Code',size = 6,required = True, help = '''This is where you have to set the period code for the intracom declaration using the format: ppyyyy
PP can stand for a month: from '01' to '12'.
PP can stand for a trimester: '31','32','33','34'
The first figure means that it is a trimester,
The second figure identify the trimester.
PP can stand for a complete fiscal year: '00'.
YYYY stands for the year (4 positions).
'''
),
'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'),
'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True),
'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"),
'mand_id' : fields.char('Reference', size=14, help="Reference given by the Representative of the sending company."),
'msg': fields.text('File created', size=14, readonly=True),
'no_vat': fields.text('Partner With No VAT', size=14, readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."),
'file_save' : fields.binary('Save File', readonly=True),
'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'country_ids': _get_europe_country,
'file_save': _get_xml_data,
'name': 'vat_intra.xml',
'tax_code_id': _get_tax_code,
}
def _get_datas(self, cr, uid, ids, context=None):
"""Collects require data for vat intra xml
:param ids: id of wizard.
:return: dict of all data to be used to generate xml for Partner VAT Intra.
:rtype: dict
"""
if context is None:
context = {}
obj_user = self.pool.get('res.users')
obj_sequence = self.pool.get('ir.sequence')
obj_partner = self.pool.get('res.partner')
xmldict = {}
post_code = street = city = country = data_clientinfo = ''
seq = amount_sum = 0
wiz_data = self.browse(cr, uid, ids[0], context=context)
comments = wiz_data.comments
if wiz_data.tax_code_id:
data_company = wiz_data.tax_code_id.company_id
else:
data_company = obj_user.browse(cr, uid, uid, context=context).company_id
# Get Company vat
company_vat = data_company.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.'))
company_vat = company_vat.replace(' ','').upper()
issued_by = company_vat[:2]
if len(wiz_data.period_code) != 6:
raise osv.except_osv(_('Error!'), _('Period code is not valid.'))
if not wiz_data.period_ids:
raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.'))
p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context)
if not p_id_list:
raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.'))
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice'])
email = data_company.partner_id.email or ''
phone = data_company.partner_id.phone or ''
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']])[0]
city = (ads.city or '')
post_code = (ads.zip or '')
if ads.street:
street = ads.street
if ads.street2:
street += ' '
street += ads.street2
if ads.country_id:
country = ads.country_id.code
if not country:
country = company_vat[:2]
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
xmldict.update({
'company_name': data_company.name,
'company_vat': company_vat,
'vatnum': company_vat[2:],
'mand_id': wiz_data.mand_id,
'sender_date': str(time.strftime('%Y-%m-%d')),
'street': street,
'city': city,
'post_code': post_code,
'country': country,
'email': email,
'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''),
'period': wiz_data.period_code,
'clientlist': [],
'comments': comments,
'issued_by': issued_by,
})
#tax code 44: services
#tax code 46L: normal good deliveries
#tax code 46T: ABC good deliveries
#tax code 48xxx: credite note on tax code xxx
codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T')
cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat,
(CASE WHEN t.code = '48s44' THEN '44'
WHEN t.code = '48s46L' THEN '46L'
WHEN t.code = '48s46T' THEN '46T'
ELSE t.code END) AS intra_code,
SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount
FROM account_move_line l
LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE t.code IN %s
AND l.period_id IN %s
AND t.company_id = %s
GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id))
p_count = 0
for row in cr.dictfetchall():
if not row['vat']:
row['vat'] = ''
p_count += 1
seq += 1
amt = row['amount'] or 0.0
amount_sum += amt
intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or ''))
xmldict['clientlist'].append({
'partner_name': row['partner_name'],
'seq': seq,
'vatnum': row['vat'][2:].replace(' ','').upper(),
'vat': row['vat'],
'country': row['vat'][:2],
'amount': round(amt,2),
'intra_code': row['intra_code'],
'code': intra_code})
xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count})
return xmldict
def create_xml(self, cursor, user, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
mod_obj = self.pool.get('ir.model.data')
xml_data = self._get_datas(cursor, user, ids, context=context)
month_quarter = xml_data['period'][:2]
year = xml_data['period'][2:]
data_file = ''
# Can't we do this by etree?
data_head = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID>
<Name>%(company_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>""" % (xml_data)
if xml_data['mand_id']:
data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data)
data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data)
if month_quarter.startswith('3'):
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
elif month_quarter.startswith('0') and month_quarter.endswith('0'):
data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
else:
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
data_clientinfo = ''
for client in xml_data['clientlist']:
if not client['vatnum']:
raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name'])
data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client)
data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data)
data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data)
context['file_save'] = data_file
model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context)
resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.intra',
'views': [(resource_id,'form')],
'view_id': 'view_vat_intra_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
def preview(self, cr, uid, ids, context=None):
xml_data = self._get_datas(cr, uid, ids, context=context)
datas = {
'ids': [],
'model': 'partner.vat.intra',
'form': xml_data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'partner.vat.intra.print',
'datas': datas,
}
partner_vat_intra()
class vat_intra_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(vat_intra_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.partner.vat.intra.print', 'partner.vat.intra', 'addons/l10n_be/wizard/l10n_be_vat_intra_print.rml', parser=vat_intra_print, header="internal")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
overflowsecurity/enumerator | refs/heads/master | enumerator.py | 1 | #!/usr/bin/python
""" Author: Maleus
Usage: ./enumerator.py <ip>
Date: 7.28.14
Made for Kali Linux, not tested on other distros.
"""
import sys
import os
import nmap
import ftplib
import subprocess
if len(sys.argv) != 2:
print "Usage ./enumerator.py <ip>"
sys.exit(1)
IP = sys.argv[1] # IP address
HOME = os.environ['HOME'] # Sets environment variable for output directory to the current users 'Home' folder.
OUTPUT_DIRECTORY = os.path.join(HOME, "Desktop", IP)# Sets path for folder on users desktop named as the IP address being scanned
try:
os.makedirs(OUTPUT_DIRECTORY)# Creates folder on users Desktop
except:
CUSTOM_NAME = raw_input("IP directory already exists; Please enter the name of your loot directory: ")
OUTPUT_DIRECTORY = os.path.join(HOME, "Desktop", CUSTOM_NAME)
os.makedirs(OUTPUT_DIRECTORY)
print "Lookin for easy pickins... Hang tight."
nm = nmap.PortScanner() # Initialize Nmap module
nm.scan(IP, '80,443,22,21,139,445') # Target ports
def ftp(): # Attempts to login to FTP using anonymous user
try:
ftp = ftplib.FTP(IP)
ftp.login()
print "0.0"
print "FTP ALLOWS ANONYMOUS ACCESS!"
ftp.quit()
except:
print "FTP does not allow anonymous access :("
def dirb_80(): # Runs dirb on port 80.
DIRB_80 = os.path.join(OUTPUT_DIRECTORY, 'dirb_80.txt')
os.system('xterm -hold -e dirb http://'+IP+' -o '+DIRB_80+' &')
print 'Running Dirb on port 80 - Check the target folder for output file.'
def dirb_443(): # Runs dirb on port 443.
DIRB_443 = os.path.join(OUTPUT_DIRECTORY, 'dirb_443.txt')
os.system('xterm -hold -e dirb https://'+IP+' -o ' +DIRB_443+ ' &')
print 'Running Dirb on port 443 - Check the target folder for output file.'
def enum4linux(): # Runs enum4linux on the target machine if smb service is detected.
ENUM_FILE = os.path.join(OUTPUT_DIRECTORY, 'enum_info.txt')
proc = subprocess.Popen('enum4linux '+IP+' > '+ENUM_FILE+' &', shell = True)
stdout,stderr = proc.communicate()
print 'Beginning enum4linux - this may take a few minutes to complete. - Info will be available in the enum_info.txt file -'
def nikto_80(): # Runs Nikto on port 80
NIKTO_80 = os.path.join(OUTPUT_DIRECTORY, 'nikto_80.txt')
os.system('xterm -hold -e nikto -host http://'+IP+' -output '+NIKTO_80+' &')
print 'Running Nikto against port 80 - Check target folder for output file.'
def nikto_443():# Runs Nikto on port 443
NIKTO_443 = os.path.join(OUTPUT_DIRECTORY, 'nikto_443.txt')
os.system('xterm -hold -e nikto -host https://'+IP+' -output '+NIKTO_443+' &')
print 'Running Nikto against port 443 - Check target folder for output file.'
#Initial Nmap scans
for host in nm.all_hosts():
print('--------------------')
print('Host: %s (%s)' % (IP, nm[host].hostname()))
print('State: %s' % nm[host].state())
print('--------------------')
for proto in nm[host].all_protocols():
print('--------------------')
print('Protocol: %s' % proto)
lport = nm[host]['tcp'].keys()
lport.sort()
for port in lport:
print('--------------------')
print('port: %s\tstate: %s' % (port, nm[host][proto][port]['state']))
print('--------------------')
def has_open_port(port_num):
return nm[IP]['tcp'][port_num]['state'] == 'open'
#Function Checks
if has_open_port(21):
ftp()
if has_open_port(80):
dirb_80()
nikto_80()
if has_open_port(443):
dirb_443()
nikto_443()
if has_open_port(139):
enum4linux()
if has_open_port(445):
enum4linux()
#Nmap Service Scan
print "Beginning Service Scan of all ports... Your pwnage can begin soon..."
NMAP_INFO = os.path.join(OUTPUT_DIRECTORY, 'nmap_full.txt')# Nmap full service info file
os.system('nmap -A -p- -T4 -oN '+NMAP_INFO+' '+IP) # Full TCP scan of all 65535 ports |
IronLanguages/ironpython3 | refs/heads/master | Src/StdLib/Lib/test/test_fnmatch.py | 173 | """Test cases for the fnmatch module."""
from test import support
import unittest
from fnmatch import fnmatch, fnmatchcase, translate, filter
class FnmatchTestCase(unittest.TestCase):
def check_match(self, filename, pattern, should_match=1, fn=fnmatch):
if should_match:
self.assertTrue(fn(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
self.assertTrue(not fn(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
def test_fnmatch(self):
check = self.check_match
check('abc', 'abc')
check('abc', '?*?')
check('abc', '???*')
check('abc', '*???')
check('abc', '???')
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
check('abc', 'ab[de]', 0)
check('a', '??', 0)
check('a', 'b', 0)
# these test that '\' is handled correctly in character sets;
# see SF bug #409651
check('\\', r'[\]')
check('a', r'[!\]')
check('\\', r'[!\]', 0)
# test that filenames with newlines in them are handled correctly.
# http://bugs.python.org/issue6665
check('foo\nbar', 'foo*')
check('foo\nbar\n', 'foo*')
check('\nfoo', 'foo*', False)
check('\n', '*')
def test_mix_bytes_str(self):
self.assertRaises(TypeError, fnmatch, 'test', b'*')
self.assertRaises(TypeError, fnmatch, b'test', '*')
self.assertRaises(TypeError, fnmatchcase, 'test', b'*')
self.assertRaises(TypeError, fnmatchcase, b'test', '*')
def test_fnmatchcase(self):
check = self.check_match
check('AbC', 'abc', 0, fnmatchcase)
check('abc', 'AbC', 0, fnmatchcase)
def test_bytes(self):
self.check_match(b'test', b'te*')
self.check_match(b'test\xff', b'te*\xff')
self.check_match(b'foo\nbar', b'foo*')
class TranslateTestCase(unittest.TestCase):
def test_translate(self):
self.assertEqual(translate('*'), '.*\Z(?ms)')
self.assertEqual(translate('?'), '.\Z(?ms)')
self.assertEqual(translate('a?b*'), 'a.b.*\Z(?ms)')
self.assertEqual(translate('[abc]'), '[abc]\Z(?ms)')
self.assertEqual(translate('[]]'), '[]]\Z(?ms)')
self.assertEqual(translate('[!x]'), '[^x]\Z(?ms)')
self.assertEqual(translate('[^x]'), '[\\^x]\Z(?ms)')
self.assertEqual(translate('[x'), '\\[x\Z(?ms)')
class FilterTestCase(unittest.TestCase):
def test_filter(self):
self.assertEqual(filter(['a', 'b'], 'a'), ['a'])
def test_main():
support.run_unittest(FnmatchTestCase,
TranslateTestCase,
FilterTestCase)
if __name__ == "__main__":
test_main()
|
r0ro/pyopenssl | refs/heads/master | tests/test_tsafe.py | 4 | # Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :py:obj:`OpenSSL.tsafe`.
"""
from OpenSSL.SSL import TLSv1_METHOD, Context
from OpenSSL.tsafe import Connection
from .util import TestCase
class ConnectionTest(TestCase):
"""
Tests for :py:obj:`OpenSSL.tsafe.Connection`.
"""
def test_instantiation(self):
"""
:py:obj:`OpenSSL.tsafe.Connection` can be instantiated.
"""
# The following line should not throw an error. This isn't an ideal
# test. It would be great to refactor the other Connection tests so
# they could automatically be applied to this class too.
Connection(Context(TLSv1_METHOD), None)
|
msg4real/pygooglevoice | refs/heads/master | examples/parse_sms.py | 38 | #
#SMS test via Google Voice
#
#John Nagle
# nagle@animats.com
#
from googlevoice import Voice
import sys
import BeautifulSoup
def extractsms(htmlsms) :
"""
extractsms -- extract SMS messages from BeautifulSoup tree of Google Voice SMS HTML.
Output is a list of dictionaries, one per message.
"""
msgitems = [] # accum message items here
# Extract all conversations by searching for a DIV with an ID at top level.
tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree
conversations = tree.findAll("div",attrs={"id" : True},recursive=False)
for conversation in conversations :
# For each conversation, extract each row, which is one SMS message.
rows = conversation.findAll(attrs={"class" : "gc-message-sms-row"})
for row in rows : # for all rows
# For each row, which is one message, extract all the fields.
msgitem = {"id" : conversation["id"]} # tag this message with conversation ID
spans = row.findAll("span",attrs={"class" : True}, recursive=False)
for span in spans : # for all spans in row
cl = span["class"].replace('gc-message-sms-', '')
msgitem[cl] = (" ".join(span.findAll(text=True))).strip() # put text in dict
msgitems.append(msgitem) # add msg dictionary to list
return msgitems
voice = Voice()
voice.login()
voice.sms()
for msg in extractsms(voice.sms.html):
print str(msg)
|
DDEFISHER/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/html5parser.py | 423 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
justinlulejian/fah-gae | refs/heads/master | lib/flask/testsuite/helpers.py | 146 | # -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite
|
mikedelong/aarhus | refs/heads/master | demos/clusters_from_topics.py | 1 | # http://stats.stackexchange.com/questions/28904/how-to-cluster-lda-lsi-topics-generated-by-gensim
# coding:utf-8
import cPickle as pickle
import glob
import logging
import os
import scipy
import scipy.sparse
import string
import sys
import time
from collections import defaultdict
import gensim.matutils
import gensim.utils
import numpy
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english")
data_dir = os.path.join(os.getcwd(), 'data/')
output_dir = os.path.join(os.getcwd(), 'output/')
work_dir = os.path.join(os.getcwd(), 'model', os.path.basename(__file__).rstrip('.py'))
if not os.path.exists(work_dir):
os.mkdir(work_dir)
os.chdir(work_dir)
logger = logging.getLogger('text_similar')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# convert to unicode
def to_unicode(arg_text):
result = arg_text.lower()
if not isinstance(result, unicode):
result = result.decode('utf-8', 'ignore')
result = ' '.join(
["".join([character for character in unicode(word) if character not in string.punctuation]) for word in
result.split(' ') if not any([word.startswith('http:'), word.startswith('https:'),
word.startswith('mailto:'), word.endswith('.com'),
word.endswith('.org')])])
return result
def to_unicode_unrolled(arg_text):
t = arg_text.lower()
result = []
if not isinstance(t, unicode):
t = t.decode('utf-8', 'ignore')
for word in t.split(' '):
b0 = word.startswith(u'http:')
b6 = word.startswith(u'<http:')
b1 = word.startswith(u'https:')
b2 = word.startswith(u'mailto:')
b3 = word.endswith(u'.com')
b4 = word.endswith(u'.org')
b5 = any([b0, b1, b2, b3, b4, b6])
if not b5:
word = ' '.join(
["".join([character for character in unicode(word) if character not in string.punctuation])])
result.append(word)
return " ".join(result)
def remove_stopwords_and_stem(arg_text):
result = [stemmer.stem(item) for item in arg_text if item not in stopwords.words('english')]
return result
class TextSimilar(gensim.utils.SaveLoad):
def __init__(self):
self.conf = {}
self.dictionary = None
self.docs = None
self.fname = None
self.lda = None
self.lda_similarity_index = None
self.lda_tfidf = None
self.lda_tfidf_similarity_index = None
self.logent = None
self.logent_similarity_index = None
self.lsi = None
self.lsi_similarity_index = None
self.method = None
self.para = None
self.similar_index = None
self.tfidf = None
def _preprocess(self):
# todo write a more pythonic version of this function and use it
docs = [to_unicode_unrolled(open(f, 'r').read().strip()).split() for f in glob.glob(self.fname)]
logger.debug('ingested files into big array with length %d' % len(docs))
docs = [remove_stopwords_and_stem(item) for item in docs]
logger.debug('removed stopwords and stemmed')
pickle.dump(docs, open(self.conf['fname_docs'], 'wb'))
logger.debug('pickle dump to %s done' % self.conf['fname_docs'])
dictionary = corpora.Dictionary(docs)
dictionary.save(self.conf['fname_dict'])
logger.debug('dictionary save to %s done' % self.conf['fname_dict'])
corpus = [dictionary.doc2bow(doc) for doc in docs]
corpora.MmCorpus.serialize(self.conf['fname_corpus'], corpus)
logger.debug('corpus serialize to %s done' % self.conf['fname_corpus'])
return docs, dictionary, corpus
def _generate_conf(self):
fname = self.fname[self.fname.rfind('/') + 1:]
self.conf['fname_docs'] = '%s.docs' % fname
self.conf['fname_dict'] = '%s.dict' % fname
self.conf['fname_corpus'] = '%s.mm' % fname
def train(self, arg_fname, is_pre=True, method='lsi', **params):
self.fname = arg_fname
self.method = method
self._generate_conf()
if is_pre:
self.docs, self.dictionary, corpus = self._preprocess()
else:
self.docs = pickle.load(open(self.conf['fname_docs']))
self.dictionary = corpora.Dictionary.load(self.conf['fname_dict'])
corpus = corpora.MmCorpus(self.conf['fname_corpus'])
if params is None:
params = {}
logger.info("training TF-IDF model")
self.tfidf = models.TfidfModel(corpus, id2word=self.dictionary)
corpus_tfidf = self.tfidf[corpus]
if method == 'lsi':
logger.info("training LSI model")
self.lsi = models.LsiModel(corpus_tfidf, id2word=self.dictionary, **params)
self.lsi.print_topics(-1)
self.lsi_similarity_index = similarities.MatrixSimilarity(self.lsi[corpus_tfidf])
self.para = self.lsi[corpus_tfidf]
elif method == 'lda_tfidf':
logger.info("training LDA model")
# try 6 workers here instead of original 8
self.lda_tfidf = models.LdaMulticore(corpus_tfidf, id2word=self.dictionary, workers=6, **params)
self.lda_tfidf.print_topics(-1)
self.lda_tfidf_similarity_index = similarities.MatrixSimilarity(self.lda[corpus_tfidf])
self.para = self.lda[corpus_tfidf]
elif method == 'lda':
logger.info("training LDA model")
# try 6 workers here instead of original 8
self.lda = models.LdaMulticore(corpus, id2word=self.dictionary, workers=6, **params)
self.lda.print_topics(-1)
self.lda_similarity_index = similarities.MatrixSimilarity(self.lda[corpus])
self.para = self.lda[corpus]
elif method == 'logentropy':
logger.info("training a log-entropy model")
self.logent = models.LogEntropyModel(corpus, id2word=self.dictionary)
self.logent_similarity_index = similarities.MatrixSimilarity(self.logent[corpus])
self.para = self.logent[corpus]
else:
msg = "unknown semantic method %s" % method
logger.error(msg)
raise NotImplementedError(msg)
def doc2vec(self, doc):
bow = self.dictionary.doc2bow(to_unicode(doc).split())
if self.method == 'lsi':
return self.lsi[self.tfidf[bow]]
elif self.method == 'lda':
return self.lda[bow]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bow]]
elif self.method == 'logentropy':
return self.logent[bow]
def find_similar(self, doc, n=10):
vec = self.doc2vec(doc)
sims = self.similar_index[vec]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
for elem in sims[:n]:
idx, value = elem
print (' '.join(self.docs[idx]), value)
def get_vectors(self):
return self._get_vector(self.para)
@staticmethod
def _get_vector(corpus):
def get_max_id():
maxid = -1
for document in corpus:
maxid = max(maxid, max(
[-1] + [fieldid for fieldid, _ in document])) # [-1] to avoid exceptions from max(empty)
return maxid
num_features = 1 + get_max_id()
index = numpy.empty(shape=(len(corpus), num_features), dtype=numpy.float32)
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.info("PROGRESS: at document #%i/%i" % (docno, len(corpus)))
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = gensim.matutils.unitvec(gensim.matutils.sparse2full(vector, num_features))
index[docno] = vector
return index
def cluster(vectors, ts, k=30, arg_method=None):
from sklearn.cluster import k_means
x = numpy.array(vectors)
cluster_center, result, inertia = k_means(x.astype(numpy.float), n_clusters=k, init="k-means++")
x__y_dic = defaultdict(set)
for i, pred_y in enumerate(result):
x__y_dic[pred_y].add(''.join(ts.docs[i]))
logger.info ('len(x__y_dic): %d' % len(x__y_dic))
output_file_name = arg_method + '-cluster.txt'
with open(output_dir + output_file_name, 'w') as fo:
for y in x__y_dic:
fo.write(str() + '\n')
fo.write('{word}\n'.format(word='\n'.join(list(x__y_dic[y])[:100])))
def main(arg_is_train=True):
# todo make the data directory an input parameter
# file_name = data_dir + '/files.tar'
file_name = data_dir + '/*'
# todo make this an input parameter
topics_count = 100
# todo make this an input parameter
methods = ['lda', 'lda_tfidf', 'lsi'] # leaving out logentropy due to memory issues
for method in methods:
text_similar = TextSimilar()
if arg_is_train:
text_similar.train(file_name, method=method, num_topics=topics_count, is_pre=True, iterations=100)
text_similar.save(method)
else:
text_similar = TextSimilar().load(method)
index = text_similar.get_vectors()
cluster(index, text_similar, k=topics_count, arg_method=method)
if __name__ == '__main__':
is_train = True if len(sys.argv) > 1 else False
start_time = time.time()
main(is_train)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info(
"Elapsed time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
|
shahankhatch/scikit-learn | refs/heads/master | examples/covariance/plot_sparse_cov.py | 300 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
Snazz2001/BDA_py_demos | refs/heads/master | demos_ch2/demo2_2.py | 19 | """Bayesian data analysis, 3rd ed
Chapter 2, demo 2
Illustrate the effect of a prior. Comparison of posterior distributions with
different parameter values for Beta prior distribution.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Grid
x = np.linspace(0.36, 0.54, 150)
# Posterior with data (437,543) and uniform prior Beta(1,1)
au = 438
bu = 544
# Calculate densities
pdu = beta.pdf(x, au, bu)
# Compare 3 cases
# Arrays of different priors: Beta(0.485*n,(1-0.485)*n), for n = 2, 20, 200
ap = np.array([0.485 * (2*10**i) for i in range(3)])
bp = np.array([(1-0.485) * (2*10**i) for i in range(3)])
# Corresponding posteriors with data (437,543)
ai = 437 + ap
bi = 543 + bp
# Calculate prior and posterior densities
pdp = beta.pdf(x, ap[:,np.newaxis], bp[:,np.newaxis])
pdi = beta.pdf(x, ai[:,np.newaxis], bi[:,np.newaxis])
"""
The above two expressions uses numpy broadcasting inside the `beta.pdf`
function. Arrays `ap` and `bp` have shape (3,) i.e. they are 1d arrays of
length 3. Array `x` has shape (150,) and the output `pdp` is an array of shape
(3,150).
Instead of using the `beta.pdf` function, we could have also calculated other
arithmetics. For example `out = x + (ap * bp)[:,np.newaxis]` returns an array
of shape (3,150), where each element `out[i,j] = x[j] + ap[i] * bp[i]`.
With broadcasting, unnecessary repetition is avoided, i.e. it is not necessary
to create an array of `ap` repeated 150 times into the memory. More info can be
found on the numpy documentation. Compare to `bsxfun` in Matlab.
"""
# Plot 3 subplots
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, sharey=True,
figsize=(8, 12))
# Leave space for the legend on bottom and remove some space from the top
fig.subplots_adjust(bottom=0.2, top=0.94)
for i in range(3):
# Add vertical line
known = axes[i].axvline(0.485, color='#4daf4a', linewidth=1.5, alpha=0.5)
# Plot three precalculated densities
post1, = axes[i].plot(x, pdu, color='#ff8f20', linewidth=2.5)
prior, = axes[i].plot(x, pdp[i], 'k:', linewidth=1.5)
post2, = axes[i].plot(x, pdi[i], 'k--', linewidth=1.5)
plt.yticks(())
# Set the title for this subplot
axes[i].set_title(r'$\alpha/(\alpha+\beta) = 0.485,\quad \alpha+\beta = {}$'
.format(2*10**i), fontsize=18)
# Limit xaxis
axes[0].autoscale(axis='x', tight=True)
axes[0].set_ylim((0,30))
# Add legend to the last subplot
axes[-1].legend(
(post1, prior, post2, known),
( 'posterior with uniform prior',
'informative prior',
'posterior with informative prior',
r'known $\theta=0.485$ in general population'),
loc='upper center',
bbox_to_anchor=(0.5, -0.15)
)
# Display the figure
plt.show()
|
madsmpedersen/MMPE | refs/heads/master | functions/__init__.py | 1 | d = None
d = dir()
from .process_exec import pexec
from .inspect_ext import class_list, argument_string
from .deep_coding import deep_encode, to_str, deep_decode, to_bytes
from .exe_std_err import ExeStdErr
from .exe_std_out import ExeStdOut
__all__ = [m for m in set(dir()) - set(d)]
|
Lucasgscruz/harpia | refs/heads/master | harpia/bpGUI/laplace.py | 2 | # -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org),
# Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/laplace.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'LAPLMaskSize',
'BackgroundColor',
'BorderColor',
'HelpView',
'laplace_confirm'
]
handlers = [
'on_cancel_clicked',
'on_laplace_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.block_properties = self.m_oPropertiesXML.getTag("properties").getTag("block").getChildTags("property")
# load properties values
for Property in self.block_properties:
name = Property.getAttr("name")
value = Property.getAttr("value")
if name == "masksize":
if value == "1":
self.widgets['LAPLMaskSize'].set_active(int(0))
if value == "3":
self.widgets['LAPLMaskSize'].set_active(int(1))
if value == "5":
self.widgets['LAPLMaskSize'].set_active(int(2))
if value == "7":
self.widgets['LAPLMaskSize'].set_active(int(3))
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/laplace" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
# ----------------------------------------------------------------------
def getHelp(self):
return "operação de filtragem que calcula o Laplaciano de uma imagem,\
realçando cantos e bordas de objetos."
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_laplace_confirm_clicked(self, *args):
self.widgets['laplace_confirm'].grab_focus()
for Property in self.block_properties:
name = Property.getAttr("name")
if name == "masksize":
Active = self.widgets['LAPLMaskSize'].get_active()
if int(Active) == 0:
Property.setAttr("value", unicode("1"))
if int(Active) == 1:
Property.setAttr("value", unicode("3"))
if int(Active) == 2:
Property.setAttr("value", unicode("5"))
if int(Active) == 3:
Property.setAttr("value", unicode("7"))
self.m_oS2iBlockProperties.SetPropertiesXML(self.m_oPropertiesXML)
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
# LaplaceProperties = Properties()
# LaplaceProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
for propIter in blockTemplate.properties:
if propIter[0] == 'masksize':
masksizeValue = propIter[1]
else:
masksizeValue = '3'
blockTemplate.imagesIO = \
'IplImage * block$$_img_i1 = NULL;\n' + \
'IplImage * block$$_img_o1 = NULL;\n' + \
'IplImage * block$$_img_t = NULL;\n'
blockTemplate.functionCall = '\nif(block$$_img_i1){\n' + \
'block$$_img_o1 = cvCreateImage(cvSize(block$$' + \
'_img_i1->width,block$$_img_i1->height), IPL_DEPTH_32F,block$$_img_i1->nChannels);\n' + \
'cvLaplace(block$$_img_i1, block$$_img_o1 ,' + masksizeValue + ' );}\n'
blockTemplate.dealloc = 'cvReleaseImage(&block$$_img_o1);\n' + \
'cvReleaseImage(&block$$_img_i1);\n' + \
'cvReleaseImage(&block$$_img_t);\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Laplace"),
"Path": {"Python": "laplace",
"Glade": "glade/laplace.ui",
"Xml": "xml/laplace.xml"},
"Icon": "images/laplace.png",
"Color": "250:180:80:150",
"InTypes": {0: "HRP_IMAGE"},
"OutTypes": {0: "HRP_IMAGE"},
"Description": _("Filtering operation that uses the Laplacian mask to enhance edges on the image."),
"TreeGroup": _("Gradients, Edges and Corners")
}
|
openstack/python-barbicanclient | refs/heads/master | barbicanclient/v1/acls.py | 1 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_utils.timeutils import parse_isotime
from barbicanclient import base
from barbicanclient import formatter
LOG = logging.getLogger(__name__)
DEFAULT_OPERATION_TYPE = 'read'
VALID_ACL_OPERATIONS = ['read', 'write', 'delete', 'list']
class ACLFormatter(formatter.EntityFormatter):
columns = ("Operation Type",
"Project Access",
"Users",
"Created",
"Updated",
)
def _get_formatted_data(self):
created = self.created.isoformat() if self.created else None
updated = self.updated.isoformat() if self.updated else None
data = (self.operation_type,
self.project_access,
self.users,
created,
updated,
self.acl_ref,
)
return data
class _PerOperationACL(ACLFormatter):
def __init__(self, parent_acl, entity_ref=None, users=None,
project_access=None, operation_type=None,
created=None, updated=None):
"""Per Operation ACL data instance for secret or container.
This class not to be instantiated outside of this module.
:param parent_acl: acl entity to this per operation data belongs to
:param str entity_ref: Full HATEOAS reference to a secret or container
:param users: List of Keystone userid(s) to be used for ACL.
:type users: List or None
:param bool project_access: Flag indicating project access behavior
:param str operation_type: Type indicating which class of Barbican
operations this ACL is defined for e.g. 'read' operations
:param str created: Time string indicating ACL create timestamp. This
is populated only when populating data from api response. Not
needed in client input.
:param str updated: Time string indicating ACL last update timestamp.
This is populated only when populating data from api response. Not
needed in client input.
"""
self._parent_acl = parent_acl
self._entity_ref = entity_ref
self._users = users if users else list()
self._project_access = project_access
self._operation_type = operation_type
self._created = parse_isotime(created) if created else None
self._updated = parse_isotime(updated) if updated else None
@property
def acl_ref(self):
return ACL.get_acl_ref_from_entity_ref(self.entity_ref)
@property
def acl_ref_relative(self):
return self._parent_acl.acl_ref_relative
@property
def entity_ref(self):
return self._entity_ref
@property
def entity_uuid(self):
return self._parent_acl.entity_uuid
@property
def project_access(self):
"""Flag indicating project access behavior is enabled or not"""
return self._project_access
@property
def users(self):
"""List of users for this ACL setting"""
return self._users
@property
def operation_type(self):
"""Type indicating class of Barbican operations for this ACL"""
return self._operation_type
@property
def created(self):
return self._created
@property
def updated(self):
return self._updated
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@project_access.setter
def project_access(self, value):
self._project_access = value
@users.setter
def users(self, value):
self._users = value
def remove(self):
"""Remove operation specific setting defined for a secret or container
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
"""
LOG.debug('Removing {0} operation specific ACL for href: {1}'
.format(self.operation_type, self.acl_ref))
self._parent_acl.load_acls_data()
acl_entity = self._parent_acl
# Find matching operation specific acl entry and remove from list
per_op_acl = acl_entity.get(self.operation_type)
if per_op_acl:
acl_entity.operation_acls.remove(per_op_acl)
# after above operation specific acl removal, check if there are
# any remaining acls. If yes, then submit updates to server.
# If not, then remove/delete acls from server.
if acl_entity.operation_acls:
acl_entity.submit()
else:
acl_entity.remove()
def _validate_users_type(self):
if self.users and not (type(self.users) is list or
type(self.users) is set):
raise ValueError('Users value is expected to be provided'
' as list/set.')
class ACL(object):
_resource_name = 'acl'
def __init__(self, api, entity_ref, users=None, project_access=None,
operation_type=DEFAULT_OPERATION_TYPE, created=None,
updated=None):
"""Base ACL entity instance for secret or container.
Provide ACL data arguments to set ACL setting for given operation_type.
To add ACL setting for other operation types, use `add_operation_acl`
method.
:param api: client instance reference
:param str entity_ref: Full HATEOAS reference to a secret or container
:param users: List of Keystone userid(s) to be used for ACL.
:type users: str List or None
:param bool project_access: Flag indicating project access behavior
:param str operation_type: Type indicating which class of Barbican
operations this ACL is defined for e.g. 'read' operations
:param str created: Time string indicating ACL create timestamp. This
is populated only when populating data from api response. Not
needed in client input.
:param str updated: Time string indicating ACL last update timestamp.
This is populated only when populating data from api response. Not
needed in client input.
"""
self._api = api
self._entity_ref = entity_ref
self._operation_acls = []
# create per operation ACL data entity only when client has set users
# or project_access flag.
if users is not None or project_access is not None:
acl = _PerOperationACL(parent_acl=self, entity_ref=entity_ref,
users=users, project_access=project_access,
operation_type=operation_type,
created=created, updated=updated)
self._operation_acls.append(acl)
@property
def entity_ref(self):
"""Entity URI reference."""
return self._entity_ref
@property
def entity_uuid(self):
"""Entity UUID"""
return str(base.validate_ref_and_return_uuid(
self._entity_ref, self._acl_type))
@property
def operation_acls(self):
"""List of operation specific ACL settings."""
return self._operation_acls
@property
def acl_ref(self):
return ACL.get_acl_ref_from_entity_ref(self.entity_ref)
@property
def acl_ref_relative(self):
return ACL.get_acl_ref_from_entity_ref_relative(
self.entity_uuid, self._parent_entity_path)
def add_operation_acl(self, users=None, project_access=None,
operation_type=None, created=None,
updated=None,):
"""Add ACL settings to entity for specific operation type.
If matching operation_type ACL already exists, then it replaces it with
new PerOperationACL object using provided inputs. Otherwise it appends
new PerOperationACL object to existing per operation ACL list.
This just adds to local entity and have not yet applied these changes
to server.
:param users: List of Keystone userid(s) to be used in ACL.
:type users: List or None
:param bool project_access: Flag indicating project access behavior
:param str operation_type: Type indicating which class of Barbican
operations this ACL is defined for e.g. 'read' operations
:param str created: Time string indicating ACL create timestamp. This
is populated only when populating data from api response. Not
needed in client input.
:param str updated: Time string indicating ACL last update timestamp.
This is populated only when populating data from api response. Not
needed in client input.
"""
new_acl = _PerOperationACL(parent_acl=self, entity_ref=self.entity_ref,
users=users, project_access=project_access,
operation_type=operation_type,
created=created, updated=updated)
for i, acl in enumerate(self._operation_acls):
if acl.operation_type == operation_type:
# replace with new ACL setting
self._operation_acls[i] = new_acl
break
else:
self._operation_acls.append(new_acl)
def _get_operation_acl(self, operation_type):
return next((acl for acl in self._operation_acls
if acl.operation_type == operation_type), None)
def get(self, operation_type):
"""Get operation specific ACL instance.
:param str operation_type: Type indicating which operation's ACL
setting is needed.
"""
return self._get_operation_acl(operation_type)
def __getattr__(self, name):
if name in VALID_ACL_OPERATIONS:
return self._get_operation_acl(name)
else:
raise AttributeError(name)
def submit(self):
"""Submits ACLs for a secret or a container defined in server
In existing ACL case, this overwrites the existing ACL setting with
provided inputs. If input users are None or empty list, this will
remove existing ACL users if there. If input project_access flag is
None, then default project access behavior is enabled.
:returns: str acl_ref: Full HATEOAS reference to a secret or container
ACL.
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
LOG.debug('Submitting complete {0} ACL for href: {1}'
.format(self.acl_type, self.entity_ref))
if not self.operation_acls:
raise ValueError('ACL data for {0} is not provided.'.
format(self._acl_type))
self.validate_input_ref()
acl_dict = {}
for per_op_acl in self.operation_acls:
per_op_acl._validate_users_type()
op_type = per_op_acl.operation_type
acl_data = {}
if per_op_acl.project_access is not None:
acl_data['project-access'] = per_op_acl.project_access
if per_op_acl.users is not None:
acl_data['users'] = per_op_acl.users
acl_dict[op_type] = acl_data
response = self._api.put(self.acl_ref_relative, json=acl_dict)
return response.json().get('acl_ref')
def remove(self):
"""Remove Barbican ACLs setting defined for a secret or container
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
"""
self.validate_input_ref()
LOG.debug('Removing ACL for {0} for href: {1}'
.format(self.acl_type, self.entity_ref))
self._api.delete(self.acl_ref_relative)
def load_acls_data(self):
"""Loads ACL entity from Barbican server using its acl_ref
Clears the existing list of per operation ACL settings if there.
Populates current ACL entity with ACL settings received from Barbican
server.
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
response = self._api.get(self.acl_ref_relative)
del self.operation_acls[:] # clearing list for all of its references
for op_type in response:
acl_dict = response.get(op_type)
proj_access = acl_dict.get('project-access')
users = acl_dict.get('users')
created = acl_dict.get('created')
updated = acl_dict.get('updated')
self.add_operation_acl(operation_type=op_type,
project_access=proj_access,
users=users, created=created,
updated=updated)
def validate_input_ref(self):
res_title = self._acl_type.title()
if not self.entity_ref:
raise ValueError('{0} href is required.'.format(res_title))
if self._parent_entity_path in self.entity_ref:
if '/acl' in self.entity_ref:
raise ValueError('{0} ACL URI provided. Expecting {0} URI.'
.format(res_title))
ref_type = self._acl_type
else:
raise ValueError('{0} URI is not specified.'.format(res_title))
base.validate_ref_and_return_uuid(self.entity_ref, ref_type)
return ref_type
@staticmethod
def get_acl_ref_from_entity_ref(entity_ref):
# Utility for converting entity ref to acl ref
if entity_ref:
entity_ref = entity_ref.rstrip('/')
return '{0}/{1}'.format(entity_ref, ACL._resource_name)
@staticmethod
def get_acl_ref_from_entity_ref_relative(entity_ref, entity_type):
# Utility for converting entity ref to acl ref
if entity_ref:
entity_ref = entity_ref.rstrip('/')
return '{0}/{1}/{2}'.format(entity_type, entity_ref,
ACL._resource_name)
@staticmethod
def identify_ref_type(entity_ref):
# Utility for identifying ACL type from given entity URI.
if not entity_ref:
raise ValueError('Secret or container href is required.')
if '/secrets' in entity_ref:
ref_type = 'secret'
elif '/containers' in entity_ref:
ref_type = 'container'
else:
raise ValueError('Secret or container URI is not specified.')
return ref_type
class SecretACL(ACL):
"""ACL entity for a secret"""
columns = ACLFormatter.columns + ("Secret ACL Ref",)
_acl_type = 'secret'
_parent_entity_path = '/secrets'
@property
def acl_type(self):
return self._acl_type
class ContainerACL(ACL):
"""ACL entity for a container"""
columns = ACLFormatter.columns + ("Container ACL Ref",)
_acl_type = 'container'
_parent_entity_path = '/containers'
@property
def acl_type(self):
return self._acl_type
class ACLManager(base.BaseEntityManager):
"""Entity Manager for Secret or Container ACL entities"""
acl_class_map = {
'secret': SecretACL,
'container': ContainerACL
}
def __init__(self, api):
super(ACLManager, self).__init__(api, ACL._resource_name)
def create(self, entity_ref=None, users=None, project_access=None,
operation_type=DEFAULT_OPERATION_TYPE):
"""Factory method for creating `ACL` entity.
`ACL` object returned by this method have not yet been
stored in Barbican.
Input entity_ref is used to determine whether
ACL object type needs to be :class:`barbicanclient.acls.SecretACL`
or :class:`barbicanclient.acls.ContainerACL`.
:param str entity_ref: Full HATEOAS reference to a secret or container
:param users: List of Keystone userid(s) to be used in ACL.
:type users: List or None
:param bool project_access: Flag indicating project access behavior
:param str operation_type: Type indicating which class of Barbican
operations this ACL is defined for e.g. 'read' operations
:returns: ACL object instance
:rtype: :class:`barbicanclient.v1.acls.SecretACL` or
:class:`barbicanclient.v1.acls.ContainerACL`
"""
entity_type = ACL.identify_ref_type(entity_ref)
entity_class = ACLManager.acl_class_map.get(entity_type)
# entity_class cannot be None as entity_ref is already validated above
return entity_class(api=self._api, entity_ref=entity_ref, users=users,
project_access=project_access,
operation_type=operation_type)
def get(self, entity_ref):
"""Retrieve existing ACLs for a secret or container found in Barbican
:param str entity_ref: Full HATEOAS reference to a secret or container.
:returns: ACL entity object instance
:rtype: :class:`barbicanclient.v1.acls.SecretACL` or
:class:`barbicanclient.v1.acls.ContainerACL`
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
"""
entity = self._validate_acl_ref(entity_ref)
LOG.debug('Getting ACL for {0} href: {1}'
.format(entity.acl_type, entity.acl_ref))
entity.load_acls_data()
return entity
def _validate_acl_ref(self, entity_ref):
if entity_ref is None:
raise ValueError('Expected secret or container URI is not '
'specified.')
entity_ref = entity_ref.rstrip('/')
entity_type = ACL.identify_ref_type(entity_ref)
entity_class = ACLManager.acl_class_map.get(entity_type)
acl_entity = entity_class(api=self._api, entity_ref=entity_ref)
acl_entity.validate_input_ref()
return acl_entity
|
mitodl/open-discussions | refs/heads/master | search/constants.py | 1 | """ Constants for search """
from channels.constants import POST_TYPE, COMMENT_TYPE
ALIAS_ALL_INDICES = "all"
PROFILE_TYPE = "profile"
COURSE_TYPE = "course"
RESOURCE_FILE_TYPE = "resourcefile"
PROGRAM_TYPE = "program"
USER_LIST_TYPE = "userlist"
LEARNING_PATH_TYPE = "learningpath"
VIDEO_TYPE = "video"
PODCAST_TYPE = "podcast"
PODCAST_EPISODE_TYPE = "podcastepisode"
LEARNING_RESOURCE_TYPES = (
COURSE_TYPE,
PROGRAM_TYPE,
USER_LIST_TYPE,
LEARNING_PATH_TYPE,
VIDEO_TYPE,
PODCAST_TYPE,
PODCAST_EPISODE_TYPE,
RESOURCE_FILE_TYPE,
)
VALID_OBJECT_TYPES = (
POST_TYPE,
COMMENT_TYPE,
PROFILE_TYPE,
COURSE_TYPE,
PROGRAM_TYPE,
USER_LIST_TYPE,
VIDEO_TYPE,
PODCAST_TYPE,
PODCAST_EPISODE_TYPE,
)
GLOBAL_DOC_TYPE = "_doc"
OCW_TYPE_ASSIGNMENTS = "Assignments"
OCW_TYPE_EXAMS = "Exams"
OCW_TYPE_LABS = "Labs"
OCW_TYPE_LECTURE_AUDIO = "Lecture Audio"
OCW_TYPE_LECTURE_NOTES = "Lecture Notes"
OCW_TYPE_LECTURE_VIDEOS = "Lecture Videos"
OCW_TYPE_PROJECTS = "Projects"
OCW_TYPE_READINGS = "Readings"
OCW_TYPE_RECITATIONS = "Recitations"
OCW_TYPE_TEXTBOOKS = "Online Textbooks"
OCW_TYPE_TOOLS = "Tools"
OCW_TYPE_TUTORIALS = "Tutorials"
OCW_TYPE_VIDEOS = "Videos"
OCW_SECTION_TYPE_MAPPING = {
OCW_TYPE_ASSIGNMENTS: OCW_TYPE_ASSIGNMENTS,
OCW_TYPE_EXAMS: OCW_TYPE_EXAMS,
OCW_TYPE_LABS: OCW_TYPE_LABS,
OCW_TYPE_LECTURE_AUDIO: OCW_TYPE_LECTURE_AUDIO,
OCW_TYPE_LECTURE_NOTES: OCW_TYPE_LECTURE_NOTES,
OCW_TYPE_LECTURE_VIDEOS: OCW_TYPE_LECTURE_VIDEOS,
OCW_TYPE_PROJECTS: OCW_TYPE_PROJECTS,
OCW_TYPE_READINGS: OCW_TYPE_READINGS,
OCW_TYPE_RECITATIONS: OCW_TYPE_RECITATIONS,
OCW_TYPE_TEXTBOOKS: OCW_TYPE_TEXTBOOKS,
OCW_TYPE_TOOLS: OCW_TYPE_TOOLS,
OCW_TYPE_TUTORIALS: OCW_TYPE_TUTORIALS,
OCW_TYPE_VIDEOS: OCW_TYPE_VIDEOS,
"Assignments and Student Work": OCW_TYPE_ASSIGNMENTS,
"Audio Lectures and Notes": OCW_TYPE_LECTURE_AUDIO,
"Audio Lectures": OCW_TYPE_LECTURE_AUDIO,
"Calendar & Assignments": OCW_TYPE_ASSIGNMENTS,
"Calendar & Readings": OCW_TYPE_READINGS,
"Calendar and Assignments": OCW_TYPE_ASSIGNMENTS,
"Calendar and Homework": OCW_TYPE_ASSIGNMENTS,
"Calendar and Lecture Summaries": OCW_TYPE_LECTURE_NOTES,
"Calendar and Notes": OCW_TYPE_LECTURE_NOTES,
"Calendar and Readings": OCW_TYPE_READINGS,
"Class Slides": OCW_TYPE_LECTURE_NOTES,
"Conference Videos": OCW_TYPE_VIDEOS,
"Course Notes": OCW_TYPE_LECTURE_NOTES,
"Exams & Quizzes": OCW_TYPE_EXAMS,
"Final Project": OCW_TYPE_PROJECTS,
"Final Projects": OCW_TYPE_PROJECTS,
"First Paper Assignment": OCW_TYPE_ASSIGNMENTS,
"Food Assignment": OCW_TYPE_ASSIGNMENTS,
"Homework Assignments": OCW_TYPE_ASSIGNMENTS,
"Labs and Exercises": OCW_TYPE_LABS,
"Lecture & Recitation Videos": OCW_TYPE_LECTURE_VIDEOS,
"Lecture and Studio Notes": OCW_TYPE_LECTURE_NOTES,
"Lecture Audio and Slides": OCW_TYPE_LECTURE_AUDIO,
"Lecture Handouts": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes & Slides": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes and Files": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes and Handouts": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes and References": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes and Slides": OCW_TYPE_LECTURE_NOTES,
"Lecture Notes and Video": OCW_TYPE_LECTURE_NOTES,
"Lecture Outlines": OCW_TYPE_LECTURE_NOTES,
"Lecture Slides and Code": OCW_TYPE_LECTURE_NOTES,
"Lecture Slides and Files": OCW_TYPE_LECTURE_NOTES,
"Lecture Slides and Readings": OCW_TYPE_LECTURE_NOTES,
"Lecture Slides and Supplemental Readings": OCW_TYPE_LECTURE_NOTES,
"Lecture Slides": OCW_TYPE_LECTURE_NOTES,
"Lecture slides": OCW_TYPE_LECTURE_NOTES,
"Lecture Summaries": OCW_TYPE_LECTURE_NOTES,
"Lecture Videos & Notes": OCW_TYPE_LECTURE_VIDEOS,
"Lecture Videos & Slides": OCW_TYPE_LECTURE_VIDEOS,
"Lecture Videos and Class Notes": OCW_TYPE_LECTURE_VIDEOS,
"Lecture Videos and Notes": OCW_TYPE_LECTURE_VIDEOS,
"Lecture Videos and Slides": OCW_TYPE_LECTURE_VIDEOS,
"Lectures: Audio and Slides": OCW_TYPE_LECTURE_AUDIO,
"Lectures": OCW_TYPE_LECTURE_NOTES,
"Long Project": OCW_TYPE_PROJECTS,
"Major Writing Assignments": OCW_TYPE_ASSIGNMENTS,
"MATLAB Exercises": OCW_TYPE_ASSIGNMENTS,
"Mini Quizzes": OCW_TYPE_EXAMS,
"Ongoing Assignments": OCW_TYPE_ASSIGNMENTS,
"Online Textbook": OCW_TYPE_TEXTBOOKS,
"Practice Exams": OCW_TYPE_EXAMS,
"Prior Year Exams": OCW_TYPE_EXAMS,
"Problem Sets": OCW_TYPE_ASSIGNMENTS,
"Professional Memorandum Assignments": OCW_TYPE_ASSIGNMENTS,
"Quiz": OCW_TYPE_EXAMS,
"Quizzes": OCW_TYPE_EXAMS,
"Reading Notes": OCW_TYPE_READINGS,
"Reading, Viewing, and Listening": OCW_TYPE_READINGS,
"Readings & Films": OCW_TYPE_READINGS,
"Readings & Listening": OCW_TYPE_READINGS,
"Readings & Notes": OCW_TYPE_READINGS,
"Readings and Discussion Schedule": OCW_TYPE_READINGS,
"Readings and Films Guides": OCW_TYPE_READINGS,
"Readings and Films": OCW_TYPE_READINGS,
"Readings and Homework Preparation": OCW_TYPE_READINGS,
"Readings and Lectures": OCW_TYPE_READINGS,
"Readings and Listening": OCW_TYPE_READINGS,
"Readings and Materials": OCW_TYPE_READINGS,
"Readings and Music": OCW_TYPE_READINGS,
"Readings and Other Assigned Materials": OCW_TYPE_READINGS,
"Readings and Viewings": OCW_TYPE_READINGS,
"Readings Questions": OCW_TYPE_READINGS,
"Readings, Notes & Slides": OCW_TYPE_READINGS,
"Recitation Notes": OCW_TYPE_RECITATIONS,
"Recitation Videos": OCW_TYPE_VIDEOS,
"Related Video Lectures": OCW_TYPE_LECTURE_VIDEOS,
"Selected Lecture Notes": OCW_TYPE_LECTURE_NOTES,
"Student Game Projects": OCW_TYPE_PROJECTS,
"Student Projects by Year": OCW_TYPE_PROJECTS,
"Team Projects": OCW_TYPE_PROJECTS,
"Textbook Contents": OCW_TYPE_TEXTBOOKS,
"Textbook": OCW_TYPE_TEXTBOOKS,
"Video Lectures and Slides": OCW_TYPE_LECTURE_VIDEOS,
"Video Lectures": OCW_TYPE_LECTURE_VIDEOS,
}
|
magosil86/ruffus | refs/heads/master | doc/images/web_front_page.py | 7 | #!/usr/bin/env python
"""
test2.py
[--log_file PATH]
[--verbose]
"""
################################################################################
#
# test2
#
#
# Copyright (c) 7/16/2010 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys, os
# add self to search path for testing
if __name__ == '__main__':
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
# Use import path from <<../python_modules>>
if __name__ == '__main__':
sys.path.insert(0, os.path.abspath(os.path.join(exe_path,"../..")))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
from optparse import OptionParser
import StringIO
parser = OptionParser(version="%prog 1.0", usage = "\n\n %progs [options]")
parser.add_option("-i", "--input_file", dest="input_file",
metavar="FILE",
type="string",
help="Name and path of input file. "
"Defaults to reading from STDIN.")
#
# general options: verbosity / logging
#
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more verbose messages for each additional verbose level.")
parser.add_option("-L", "--log_file", dest="log_file",
metavar="FILE",
type="string",
help="Name and path of log file")
parser.add_option("--skip_parameter_logging", dest="skip_parameter_logging",
action="store_true", default=False,
help="Do not print program parameters to log.")
parser.add_option("--debug", dest="debug",
action="count", default=0,
help="Set default program parameters in debugging mode.")
#
# pipeline
#
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="N",
type="int",
help="Allow N jobs (commands) to run simultaneously.")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Don't actually run any commands; just print the pipeline.")
parser.add_option("--flowchart", dest="flowchart",
metavar="FILE",
type="string",
help="Don't actually run any commands; just print the pipeline "
"as a flowchart.")
#
# Less common pipeline options
#
parser.add_option("--key_legend_in_graph", dest="key_legend_in_graph",
action="store_true", default=False,
help="Print out legend and key for dependency graph.")
parser.add_option("--draw_graph_horizontally", dest="draw_horizontally",
action="store_true", default=False,
help="Draw horizontal dependency graph.")
parser.add_option("--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
original_args = " ".join(sys.argv)
(options, remaining_args) = parser.parse_args()
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
options.flowchart = "front_page_flowchart.png"
options.key_legend_in_graph = True
if options.debug:
options.log_file = os.path.join("test2.log")
options.verbose = 5
options.log_parameters = True
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# mandatory options
#
mandatory_options = []
def check_mandatory_options (options, mandatory_options, helpstr):
"""
Check if specified mandatory options have b een defined
"""
missing_options = []
for o in mandatory_options:
if not getattr(options, o):
missing_options.append("--" + o)
if not len(missing_options):
return
raise Exception("Missing mandatory parameter%s: %s.\n\n%s\n\n" %
("s" if len(missing_options) > 1 else "",
", ".join(missing_options),
helpstr))
check_mandatory_options (options, mandatory_options, helpstr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from ruffus import *
from ruffus.ruffus_exceptions import JobSignalledBreak
#from json import dumps
#from collections import defaultdict
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Logger
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
import logging
import logging.handlers
MESSAGE = 15
logging.addLevelName(MESSAGE, "MESSAGE")
def setup_std_logging (logger, log_file, verbose):
"""
set up logging using programme options
"""
class debug_filter(logging.Filter):
"""
Ignore INFO messages
"""
def filter(self, record):
return logging.INFO != record.levelno
class NullHandler(logging.Handler):
"""
for when there is no logging
"""
def emit(self, record):
pass
# We are interesting in all messages
logger.setLevel(logging.DEBUG)
has_handler = False
# log to file if that is specified
if log_file:
handler = logging.FileHandler(log_file, delay=False)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)6s - %(message)s"))
handler.setLevel(MESSAGE)
logger.addHandler(handler)
has_handler = True
# log to stderr if verbose
if verbose:
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
if log_file:
stderrhandler.addFilter(debug_filter())
logger.addHandler(stderrhandler)
has_handler = True
# no logging
if not has_handler:
logger.addHandler(NullHandler())
#
# set up log
#
logger = logging.getLogger(module_name)
setup_std_logging(logger, options.log_file, options.verbose)
#
# Allow logging across Ruffus pipeline
#
def get_logger (logger_name, args):
return logger
from ruffus.proxy_logger import *
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (get_logger,
module_name,
{})
#
# log programme parameters
#
if not options.skip_parameter_logging:
programme_name = os.path.split(sys.argv[0])[1]
logger.info("%s %s" % (programme_name, original_args))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Pipeline
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@files(None, "a.1")
def task1(input_file, output_file):
open(output_file, "w")
@transform(task1, suffix("1"), "2")
def task2(input_file, output_file):
open(output_file, "w")
@transform(task2, suffix("2"), "3")
def task3(input_file, output_file):
open(output_file, "w")
@transform(task3, suffix("3"), "4")
def task4(input_file, output_file):
open(output_file, "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import time
open("a.2", "w")
time.sleep(1)
open("a.1", "w")
time.sleep(1)
open("a.3", "w")
pipeline_printout_graph ( open(options.flowchart, "w"),
os.path.splitext(options.flowchart)[1][1:],
[task4],
no_key_legend = not options.key_legend_in_graph,
user_colour_scheme = {"colour_scheme_index":0},
pipeline_name = "Pipeline Flowchart:",
size = (6,5),
dpi = 72,
)
os.unlink("a.1")
os.unlink("a.2")
os.unlink("a.3")
#pipeline_run(options.target_tasks, options.forced_tasks,
# multiprocess = options.jobs,
# logger = stderr_logger,
# verbose = options.verbose)
|
agiliopadua/lammps | refs/heads/master | examples/ASPHERE/tri/tri.srd.viz.py | 49 | # Pizza.py viz of triangle + SRD output
d = dump("dump1.atom.srd dump2.atom.srd")
t = tdump("dump1.tri.srd dump2.tri.srd")
t.map(1,"id",2,"type",
3,"corner1x",4,"corner1y",5,"corner1z",
6,"corner2x",7,"corner2y",8,"corner2z",
9,"corner3x",10,"corner3y",11,"corner3z")
d.extra(t)
g = gl(d)
g.arad(1,0.02)
g.acol(1,"green")
g.arad(2,0.05)
g.acol(2,"green")
v = vcr(g)
|
hazemalsaied/IdenSys | refs/heads/master | Src/corpus.py | 1 | import operator
import os
from param import FeatParams, XPParams, Paths
import logging
class Corpus:
"""
a class used to encapsulate all the information of the corpus
"""
mweTokenDic, mweDictionary, mwtDictionary, mwtDictionaryWithSent = {}, {}, {}, {}
def __init__(self, langName):
"""
an initializer of the corpus, responsible of creating a structure of objects encapsulating all the information
of the corpus, its sentences, tokens and MWEs.
This function iterate over the lines of corpus document to create the precedent ontology
"""
logging.warn('Language name: {0}'.format(langName))
self.trainingSents, self.testingSents, self.trainDataSet = [], [], []
path = os.path.join(Paths.corporaPath, langName)
mweFile = os.path.join(path, 'train.parsemetsv')
testMweFile = os.path.join(path, 'test.parsemetsv')
conlluFile, testConllu = self.getTrainAndTestConlluPath(path)
testFile = os.path.join(path, 'test.parsemetsv')
if conlluFile and testConllu:
self.trainDataSet = Corpus.readConlluFile(conlluFile)
Corpus.readMweFile(mweFile, self.trainDataSet)
for sent in self.trainDataSet:
sent.recognizeEmbedded()
sent.recognizeInterleavingVMWEs()
sent.recognizeContinouosandSingleVMWEs()
if XPParams.realExper:
self.testDataSet = Corpus.readConlluFile(testConllu)
Corpus.readMweFile(testMweFile, self.testDataSet)
else:
self.trainDataSet = Corpus.readSentences(mweFile)
self.testDataSet = Corpus.readSentences(testFile, forTest=True)
for sent in self.trainDataSet:
sent.recognizeEmbedded()
sent.recognizeInterleavingVMWEs()
sent.recognizeContinouosandSingleVMWEs()
self.getTrainAndTestSents()
if XPParams.useCrossValidation:
self.testRange, self.trainRange = self.getRangs()
else:
self.testRange, self.trainRange = None, None
def getTrainAndTestConlluPath(self, path):
conlluFile, testConllu = None, None
if XPParams.useAutoGeneratedPOS and XPParams.useAutoGeneratedDEP and os.path.isfile(
os.path.join(path, 'train.conllu.autoPOS.autoDep')):
conlluFile = os.path.join(path, 'train.conllu.autoPOS.autoDep')
if os.path.isfile(os.path.join(path, 'test.conllu.autoPOS.autoDep')):
testConllu = os.path.join(path, 'test.conllu.autoPOS.autoDep')
elif XPParams.useAutoGeneratedPOS and os.path.isfile(os.path.join(path, 'train.conllu.autoPOS')):
conlluFile = os.path.join(path, 'train.conllu.autoPOS')
if os.path.isfile(os.path.join(path, 'test.conllu.autoPOS')):
testConllu = os.path.join(path, 'test.conllu.autoPOS')
elif os.path.isfile(os.path.join(path, 'train.conllu')):
conlluFile = os.path.join(path, 'train.conllu')
if os.path.isfile(os.path.join(path, 'test.conllu')):
testConllu = os.path.join(path, 'test.conllu')
return conlluFile, testConllu
@staticmethod
def readConlluFile(conlluFile):
sentences = []
with open(conlluFile) as corpusFile:
# Read the corpus file
lines = corpusFile.readlines()
sent = None
senIdx = 0
sentId = ''
lineNum = 0
missedUnTag = 0
missedExTag = 0
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('# sentid:'):
sentId = line.split('# sentid:')[1].strip()
elif line.startswith('# sentence-text:'):
continue
elif line.startswith('1\t'):
if sentId.strip() != '':
sent = Sentence(senIdx, sentid=sentId)
else:
sent = Sentence(senIdx)
senIdx += 1
sentences.append(sent)
if not line.startswith('#'):
lineParts = line.split('\t')
if len(lineParts) != 10 or '-' in lineParts[0]:
continue
lineNum += 1
if lineParts[3] == '_':
missedUnTag += 1
if lineParts[4] == '_':
missedExTag += 1
morpho = ''
if lineParts[5] != '_':
morpho = lineParts[5].split('|')
if lineParts[6] != '_':
token = Token(lineParts[0], lineParts[1].lower(), lemma=lineParts[2],
abstractPosTag=lineParts[3], morphologicalInfo=morpho,
dependencyParent=int(lineParts[6]),
dependencyLabel=lineParts[7])
else:
token = Token(lineParts[0], lineParts[1].lower(), lemma=lineParts[2],
abstractPosTag=lineParts[3], morphologicalInfo=morpho,
dependencyLabel=lineParts[7])
if XPParams.useUniversalPOSTag:
token.posTag = lineParts[3]
else:
if lineParts[4] != '_':
token.posTag = lineParts[4]
else:
token.posTag = lineParts[3]
# Associate the token with the sentence
sent.tokens.append(token)
sent.text += token.text + ' '
return sentences
@staticmethod
def readMweFile(mweFile, sentences):
mweNum = 0
with open(mweFile) as corpusFile:
# Read the corpus file
lines = corpusFile.readlines()
noSentToAssign = False
sentIdx = 0
for line in lines:
if line == '\n' or line.startswith('# sentence-text:') or (
line.startswith('# sentid:') and noSentToAssign):
continue
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('1\t'):
sent = sentences[sentIdx]
sentIdx += 1
lineParts = line.split('\t')
if '-' in lineParts[0]:
continue
if lineParts is not None and len(lineParts) == 4 and lineParts[3] != '_':
token = sent.tokens[int(lineParts[0]) - 1]
vMWEids = lineParts[3].split(';')
for vMWEid in vMWEids:
id = int(vMWEid.split(':')[0])
# New MWE captured
if id not in sent.getWMWEIds():
if len(vMWEid.split(':')) > 1:
type = str(vMWEid.split(':')[1])
vMWE = VMWE(id, token, type)
else:
vMWE = VMWE(id, token)
mweNum += 1
sent.vMWEs.append(vMWE)
# Another token of an under-processing MWE
else:
vMWE = sent.getVMWE(id)
if vMWE is not None:
vMWE.addToken(token)
# associate the token with the MWE
token.setParent(vMWE)
return mweNum
@staticmethod
def readSentences(mweFile, forTest=False):
sentences = []
sentNum, mweNum = 0, 0
with open(mweFile) as corpusFile:
# Read the corpus file
lines = corpusFile.readlines()
sent = None
senIdx = 1
for line in lines:
if len(line) > 0 and line.endswith('\n'):
line = line[:-1]
if line.startswith('1\t'):
# sentId = line.split('# sentid:')[1]
if sent is not None:
# Represent the sentence as a sequece of tokens and POS tags
sent.setTextandPOS()
# if not forTest:
sent.recognizeEmbedded()
sent.recognizeInterleavingVMWEs()
sent = Sentence(senIdx)
senIdx += 1
sentences.append(sent)
elif line.startswith('# sentence-text:'):
if len(line.split(':')) > 1:
sent.text = line.split('# sentence-text:')[1]
lineParts = line.split('\t')
# Empty line or lines of the form: "8-9 can't _ _"
if len(lineParts) != 4 or '-' in lineParts[0]:
continue
token = Token(lineParts[0], lineParts[1])
# Trait the MWE
# if not forTest and lineParts[3] != '_':
if lineParts[3] != '_':
vMWEids = lineParts[3].split(';')
for vMWEid in vMWEids:
id = int(vMWEid.split(':')[0])
# New MWE captured
if id not in sent.getWMWEIds():
type = str(vMWEid.split(':')[1])
vMWE = VMWE(id, token, type)
mweNum += 1
sent.vMWEs.append(vMWE)
# Another token of an under-processing MWE
else:
vMWE = sent.getVMWE(id)
if vMWE is not None:
vMWE.addToken(token)
# associate the token with the MWE
token.setParent(vMWE)
# Associate the token with the sentence
sent.tokens.append(token)
sentNum = len(sentences)
return sentences
@staticmethod
def getMWEDic(sents):
mweDictionary, mweTokenDictionary, mwtDictionary = {}, {}, {}
for sent in sents:
for mwe in sent.vMWEs:
lemmaString = mwe.getLemmaString()
if len(mwe.tokens) == 1:
if lemmaString not in mwtDictionary:
mwtDictionary[lemmaString] = mwe.type
if lemmaString not in Corpus.mwtDictionaryWithSent:
Corpus.mwtDictionaryWithSent[lemmaString] = [sent]
elif lemmaString in Corpus.mwtDictionaryWithSent and Corpus.mwtDictionaryWithSent[
lemmaString] is not None:
Corpus.mwtDictionaryWithSent[lemmaString] = Corpus.mwtDictionaryWithSent[lemmaString].append(
sent)
if lemmaString in mweDictionary:
mweDictionary[lemmaString] += 1
for token in mwe.tokens:
if token.lemma.strip() != '':
mweTokenDictionary[token.lemma] = 1
else:
mweTokenDictionary[token.text] = 1
else:
mweDictionary[lemmaString] = 1
for token in mwe.tokens:
if token.lemma.strip() != '':
mweTokenDictionary[token.lemma] = 1
else:
mweTokenDictionary[token.text] = 1
if FeatParams.usePreciseDictionary:
for key1 in mweDictionary.keys():
for key2 in mweDictionary.keys():
if key1 != key2:
if key1 in key2:
mweDictionary.pop(key1, None)
elif key2 in key1:
mweDictionary.pop(key2, None)
return mweDictionary, mweTokenDictionary, mwtDictionary
def initializeSents(self, training=True):
# Erasing each effect of the previous iteration
sents = self.trainingSents
if not training:
sents = self.testingSents
for sent in sents:
sent.identifiedVMWEs = []
sent.initialTransition = None
sent.featuresInfo = []
sent.blackMergeNum = 0
for mwe in sent.vMWEs:
mwe.isInTrainingCorpus = 0
def getTrainAndTestSents(self):
if XPParams.debug:
self.trainingSents = self.trainDataSet[:500]
self.testingSents = self.testDataSet[:200]
elif XPParams.realExper:
self.trainingSents = self.trainDataSet
self.testingSents = self.testDataSet
elif XPParams.useCrossValidation:
self.trainDataSet = self.trainDataSet
self.testDataSet = []
return [], []
if len(self.trainingSents) <= 0:
idx = 0
self.trainingSents, self.testingSents = [], []
for sent in self.trainDataSet:
if idx % 5 == 0:
self.testingSents.append(sent)
else:
self.trainingSents.append(sent)
idx += 1
return self.trainingSents, self.testingSents
def getRangs(self):
sents = self.trainDataSet
testNum = int(len(sents) * 0.2)
testRanges = [[0, testNum], [testNum, 2 * testNum], [2 * testNum, 3 * testNum], [3 * testNum, 4 * testNum],
[4 * testNum, len(sents)]]
trainRanges = [[testNum, len(sents)], [0, testNum, 2 * testNum, len(sents)],
[0, 2 * testNum, 3 * testNum, len(sents)], [0, 3 * testNum, 4 * testNum, len(sents)],
[0, 4 * testNum]]
return testRanges, trainRanges
def divideSents(self):
x = XPParams.currentIteration
if self.testRange is None or self.trainRange is None:
return
self.testingSents = self.trainDataSet[self.testRange[x][0]: self.testRange[x][1]]
if len(self.trainRange[x]) == 2:
self.trainingSents = self.trainDataSet[self.trainRange[x][0]: self.trainRange[x][1]]
else:
self.trainingSents = self.trainDataSet[self.trainRange[x][0]: self.trainRange[x][1]] + \
self.trainDataSet[self.trainRange[x][2]: self.trainRange[x][3]]
def update(self):
if XPParams.useCrossValidation:
self.divideSents()
self.initializeSents()
Corpus.mweDictionary, Corpus.mweTokenDic, Corpus.mwtDictionary = Corpus.getMWEDic(self.trainingSents)
@staticmethod
def getNewIdentifiedMWE(testingSents):
idenMWEs = 0
newIdenMWEs = 0
semiNewIdenMWEs = 0
for sent in testingSents:
for mwe in sent.vMWEs:
if mwe.getLemmaString() not in Corpus.mweDictionary.keys():
idenMWEs += 1
for mwe in sent.identifiedVMWEs:
if mwe.getLemmaString() not in Corpus.mweDictionary.keys():
for vmw1 in sent.vMWEs:
if vmw1.getLemmaString() == mwe.getLemmaString():
newIdenMWEs += 1
break
elif mwe.getLemmaString() in Corpus.mweDictionary.keys() and \
Corpus.mweDictionary[mwe.getLemmaString()] < 5:
semiNewIdenMWEs += 1
return float(newIdenMWEs) / idenMWEs, float(semiNewIdenMWEs) / idenMWEs
def __iter__(self):
for sent in self.trainingSents:
yield sent
def __str__(self):
res = ''
for sent in self.testingSents:
tokenList = []
for token in sent.tokens:
tokenList.append(token.text.strip())
labels = ['_'] * len(tokenList)
for mwe in sent.identifiedVMWEs:
for token in mwe.tokens:
if labels[token.position - 1] == '_':
labels[token.position - 1] = str(mwe.id)
else:
labels[token.position - 1] += ';' + str(mwe.id)
if mwe.tokens[0] == token and mwe.type:
labels[token.position - 1] += ':' + mwe.type
for i in range(len(tokenList)):
res += '{0}\t{1}\t{2}\t{3}\n'.format(i + 1, tokenList[i], '_', labels[i])
res += '\n'
return res
class Sentence:
"""
a class used to encapsulate all the information of a sentence
"""
def __init__(self, id, sentid=''):
self.sentid = sentid
self.id = id
self.tokens = []
self.vMWEs = []
self.identifiedVMWEs = []
self.text = ''
self.initialTransition = None
self.featuresInfo = []
self.containsEmbedding = False
self.containsInterleaving = False
self.containsDistributedEmbedding = False
self.withRandomSelection = False
self.blackMergeNum, self.interleavingNum, self.embeddedNum, self.distributedEmbeddingNum = 0, 0, 0, 0
def getWMWEs(self):
return self.vMWEs
def getWMWEIds(self):
result = []
for vMWE in self.vMWEs:
result.append(vMWE.getId())
return result
def getVMWE(self, id):
for vMWE in self.vMWEs:
if vMWE.getId() == int(id):
return vMWE
return None
def setTextandPOS(self):
tokensTextList = []
for token in self.tokens:
self.text += token.text + ' '
tokensTextList.append(token.text)
self.text = self.text.strip()
def recognizeEmbedded(self, recognizeIdentified=False):
if recognizeIdentified:
vmws = self.identifiedVMWEs
else:
vmws = self.vMWEs
if len(vmws) <= 1:
return 0
result = 0
# [x1; x2; x3
for vMwe1 in vmws:
if vMwe1.isEmbedded:
continue
for vMwe2 in vmws:
if vMwe1 is not vMwe2 and len(vMwe1.tokens) < len(vMwe2.tokens):
if vMwe1.getString() in vMwe2.getString():
vMwe1.isEmbedded = True
if not recognizeIdentified:
self.embeddedNum += 1
self.containsEmbedding = True
result += 1
else:
isEmbedded = True
vMwe2Lemma = vMwe2.getLemmaString()
for token in vMwe1.tokens:
if token.getLemma() not in vMwe2Lemma:
isEmbedded = False
break
if isEmbedded:
vMwe1.isDistributedEmbedding = True
vMwe1.isEmbedded = True
if not recognizeIdentified:
self.containsDistributedEmbedding = True
self.embeddedNum += 1
self.distributedEmbeddingNum += 1
self.containsEmbedding = True
result += 1
if not recognizeIdentified:
self.getDirectParents()
return result
def recognizeContinouosandSingleVMWEs(self):
singleWordExp, continousExp = 0, 0
for mwe in self.vMWEs:
if len(mwe.tokens) == 1:
mwe.isSingleWordExp = True
mwe.isContinousExp = True
singleWordExp += 1
continousExp += 1
else:
if self.isContinousMwe(mwe):
continousExp += 1
return singleWordExp, continousExp
def isContinousMwe(self, mwe):
idxs = []
for token in mwe.tokens:
idxs.append(self.tokens.index(token))
range = xrange(min(idxs), max(idxs))
mwe.isContinousExp = True
for i in range:
if i not in idxs:
mwe.isContinousExp = False
return mwe.isContinousExp
def recognizeInterleavingVMWEs(self):
if len(self.vMWEs) <= 1:
return 0
result = 0
for vmwe in self.vMWEs:
if vmwe.isEmbedded or vmwe.isInterleaving:
continue
for token in vmwe.tokens:
if len(token.parentMWEs) > 1:
for parent in token.parentMWEs:
if parent is not vmwe:
if parent.isEmbedded or parent.isInterleaving:
continue
if len(parent.tokens) <= len(vmwe.tokens):
parent.isInterleaving = True
else:
vmwe.isInterleaving = True
self.containsInterleaving = True
self.interleavingNum += 1
result += 1
return result
def getCorpusText(self, gold=True):
if gold:
mwes = self.vMWEs
else:
mwes = self.identifiedVMWEs
lines = ''
idx = 1
for token in self.tokens:
line = str(idx) + '\t' + token.text + '\t_\t'
idx += 1
for mwe in mwes:
if token in mwe.tokens:
if line.endswith('\t'):
line += str(mwe.id)
else:
line += ';' + str(mwe.id)
if token == mwe.tokens[0]:
line += ':' + str(mwe.type)
if line.endswith('\t'):
line += '_'
lines += line + '\n'
return lines
def getCorpusTextWithPlus(self):
goldMwes = self.vMWEs
predMwes = self.identifiedVMWEs
lines = ''
idx = 1
for token in self.tokens:
line = str(idx) + '\t' + token.text + '\t_\t'
idx += 1
for mwe in goldMwes:
if token in mwe.tokens:
if line.endswith('\t'):
line += '+'
break
if line.endswith('\t'):
line += '_\t'
else:
line += '\t'
for mwe in predMwes:
if token in mwe.tokens:
if line.endswith('\t'):
line += '+'
break
if line.endswith('\t'):
line += '_'
lines += line + '\n'
return lines
def isPrintable(self):
if len(self.vMWEs) > 2:
return True
# if not PrintParams.printSentsWithEmbeddedMWEs:
# return
# for mwe in self.vMWEs:
# if mwe.isEmbedded:
# return True
return False
# if PrintParams.printSentsWithEmbeddedMWEs and len(self.vMWEs) > 2:
# for mwe in self.vMWEs:
# if mwe.isEmbedded:
# return True
# return False
def getDirectParents(self):
for token in self.tokens:
token.getDirectParent()
@staticmethod
def getTokens(elemlist):
if isinstance(elemlist, Token):
return [elemlist]
if isinstance(elemlist, list):
result = []
for elem in elemlist:
if isinstance(elem, Token):
result.append(elem)
elif isinstance(elem, list):
result.extend(Sentence.getTokens(elem))
return result
return [elemlist]
@staticmethod
def getTokenLemmas(tokens):
text = ''
tokens = Sentence.getTokens(tokens)
for token in tokens:
if token.lemma != '':
text += token.lemma + ' '
else:
text += token.text + ' '
return text.strip()
def printSummary(self):
vMWEText = ''
for vMWE in self.vMWEs:
vMWEText += str(vMWE) + '\n'
if len(self.identifiedVMWEs) > 0:
identifiedMWE = '### Identified MWEs: \n'
for mwe in self.identifiedVMWEs:
identifiedMWE += str(mwe) + '\n'
else:
identifiedMWE = ''
return '## Sentence No. ' + str(self.id) + ' - ' + self.sentid + '\n' + self.text + \
'\n### Existing MWEs: \n' + vMWEText + identifiedMWE
def __str__(self):
vMWEText = ''
for vMWE in self.vMWEs:
vMWEText += str(vMWE) + '\n\n'
if len(self.identifiedVMWEs) > 0:
identifiedMWE = '### Identified MWEs: \n'
for mwe in self.identifiedVMWEs:
identifiedMWE += str(mwe) + '\n\n'
else:
identifiedMWE = ''
# featuresInfo = ''
result = ''
transition = self.initialTransition
idx = 0
tab = ' '
while True:
if transition is not None:
if transition.type is not None:
type = transition.type.name
else:
type = tab * 8
configuration = str(transition.configuration)
if type.startswith('MERGE') or type.startswith('WHITE'):
type = '**' + type + '**' + tab * 3
if len(type) == 'SHIFT':
type = type + tab * 3
result += '\n\n' + str(
transition.id) + '- ' + type + tab * 3 + '>' + tab * 3 + configuration + '\n\n'
if transition.next is None:
break
transition = transition.next
idx += 1
else: # result += str(self.featuresInfo[1][idx]) + '\n\n'
break
text = ''
for token in self.tokens:
if token.parentMWEs is not None and len(token.parentMWEs) > 0:
text += '**' + token.text + '**' + ' '
else:
text += token.text + ' '
return '## Sentence No. ' + str(self.id) + ' - ' + self.sentid + '\n' + text + \
'\n### Existing MWEs: \n' + vMWEText + identifiedMWE # + 'black Merge Num : ' + str(self.blackMergeNum) + ' Interleaving Num: ' + str(self.interleavingNum) \
# + '\n' + result #+ str(self.initialTransition) + '\n### Features: \n' + featuresInfo
def __iter__(self):
for vmwe in self.vMWEs:
yield vmwe
class VMWE:
"""
A class used to encapsulate the information of a verbal multi-word expression
"""
def __init__(self, id, token=None, type='', isEmbedded=False, isInterleaving=False, isDistributedEmbedding=False,
isInTrainingCorpus=0):
self.id = int(id)
self.isInTrainingCorpus = isInTrainingCorpus
self.tokens = []
self.isSingleWordExp = False
self.isContinousExp = False
if token is not None:
self.tokens.append(token)
self.type = type
self.isEmbedded = isEmbedded
self.isDistributedEmbedding = isDistributedEmbedding
self.isInterleaving = isInterleaving
self.isVerbal = True
self.directParent = None
def getId(self):
return self.id
def addToken(self, token):
self.tokens.append(token)
@staticmethod
def getVMWENumber(tokens):
result = 0
for token in tokens:
if isinstance(token, VMWE):
result += 1
return result
@staticmethod
def haveSameParents(tokens):
# Do they have all a parent?
for token in tokens:
if not token.parentMWEs:
return None
# Get all parents of tokens
parents = set()
for token in tokens:
for parent in token.parentMWEs:
parents.add(parent)
if len(parents) == 1:
return list(parents)
selectedParents = list(parents)
for parent in parents:
for token in tokens:
if parent not in token.parentMWEs:
if parent in selectedParents:
selectedParents.remove(parent)
for parent in list(selectedParents):
if parent.isInterleaving or parent.isDistributedEmbedding:
selectedParents.remove(parent)
return selectedParents
# @staticmethod
# def haveSameDirectParents(s0, s1):
# if isinstance(s0, Token) and isinstance(s1, Token):
# return s0.directParent == s1.directParent
# if isinstance(s0, list):
@staticmethod
def getParents(tokens, type=None):
if len(tokens) == 1:
if tokens[0].parentMWEs:
for vmwe in tokens[0].parentMWEs:
if len(vmwe.tokens) == 1: # and vmwe.type.lower() != type:
if type is not None:
if vmwe.type.lower() == type.lower():
return [vmwe]
else:
return None
else:
return [vmwe]
# Do they have all a parent?
for token in tokens:
if len(token.parentMWEs) == 0:
return None
# Get all parents of tokens
parents = set()
for token in tokens:
for parent in token.parentMWEs:
parents.add(parent)
selectedParents = list(parents)
for parent in parents:
if len(parent.tokens) != len(tokens):
if parent in selectedParents:
selectedParents.remove(parent)
continue
for token in tokens:
if parent not in token.parentMWEs:
if parent in selectedParents:
selectedParents.remove(parent)
for parent in list(selectedParents):
if parent.isInterleaving or parent.isDistributedEmbedding:
selectedParents.remove(parent)
if type is not None:
for parent in list(selectedParents):
if parent.type.lower() != type:
selectedParents.remove(parent)
return selectedParents
# def getDirectParent(self):
# self.directParent = None
# if self.parentMWEs is not None and len(self.parentMWEs)> 0:
# if len(self.parentMWEs) == 1:
# self.directParent = self.parentMWEs[0]
# else:
# parents = sorted(self.parentMWEs, key=lambda VMWE : (VMWE.isInterleaving, VMWE.isEmbedded, len(VMWE.tokens) ))
# for parent in parents:
# if not parent.isInterleaving:
# self.directParent = parent
# break
# return self.directParent
def __str__(self):
tokensStr = ''
for token in self.tokens:
tokensStr += token.text + ' '
tokensStr = tokensStr.strip()
isInterleaving = ''
if self.isInterleaving:
isInterleaving = ', Interleaving '
isEmbedded = ''
if self.isEmbedded:
if self.isDistributedEmbedding:
isEmbedded = ', DistributedEmbedding '
else:
isEmbedded = ', Embedded '
# isContinousExp =''
# if self.isContinousExp:
# isContinousExp = 'Continous'
type = ''
if self.type != '':
type = '(' + self.type
if self.isInTrainingCorpus != 0:
type += ', ' + str(self.isInTrainingCorpus) + ')'
else:
type += ')'
return str(self.id) + '- ' + '**' + tokensStr + '** ' + type + isEmbedded + isInterleaving
def __iter__(self):
for t in self.tokens:
yield t
def getString(self):
result = ''
for token in self.tokens:
result += token.text + ' '
return result[:-1].lower()
def getLemmaString(self):
result = ''
for token in self.tokens:
if token.lemma.strip() != '':
result += token.lemma + ' '
else:
result += token.text + ' '
return result[:-1].lower()
def In(self, vmwes):
for vmwe in vmwes:
if vmwe.getString() == self.getString():
return True
return False
def __eq__(self, other):
if not isinstance(other, VMWE):
raise TypeError()
if self.getLemmaString() == other.getLemmaString():
return True
return False
def __hash__(self):
return hash(self.getLemmaString())
def __contains__(self, vmwe):
if not isinstance(vmwe, VMWE):
raise TypeError()
if vmwe is self or vmwe.getLemmaString() == self.getLemmaString():
return False
if vmwe.getLemmaString() in self.getLemmaString():
return True
for token in vmwe.tokens:
if token.getLemma() not in self.getLemmaString():
return False
return True
class Token:
"""
a class used to encapsulate all the information of a sentence tokens
"""
def __init__(self, position, txt, lemma='', posTag='', abstractPosTag='', morphologicalInfo=[], dependencyParent=-1,
dependencyLabel=''):
self.position = int(position)
self.text = txt
self.lemma = lemma
self.abstractPosTag = abstractPosTag
self.posTag = posTag
self.morphologicalInfo = morphologicalInfo
self.dependencyParent = dependencyParent
self.dependencyLabel = dependencyLabel
self.parentMWEs = []
self.directParent = None
def setParent(self, vMWE):
self.parentMWEs.append(vMWE)
def getLemma(self):
if self.lemma != '':
return self.lemma.strip()
return self.text.strip()
def getDirectParent(self):
self.directParent = None
if self.parentMWEs is not None and len(self.parentMWEs) > 0:
if len(self.parentMWEs) == 1:
if not self.parentMWEs[0].isInterleaving:
self.directParent = self.parentMWEs[0]
else:
parents = sorted(self.parentMWEs,
key=lambda VMWE: (VMWE.isInterleaving, VMWE.isEmbedded, len(VMWE.tokens)),
reverse=True)
for parent in parents:
if not parent.isInterleaving:
self.directParent = parent
break
return self.directParent
def In(self, vmwe):
for token in vmwe.tokens:
if token.text.lower() == self.text.lower() and token.position == self.position:
return True
return False
def isMWT(self):
if self.parentMWEs:
for vmw in self.parentMWEs:
if len(vmw.tokens) == 1:
return vmw
return None
def __str__(self):
parentTxt = ''
if len(self.parentMWEs) != 0:
for parent in self.parentMWEs:
parentTxt += str(parent) + '\n'
return str(self.position) + ' : ' + self.text + ' : ' + self.posTag + '\n' + 'parent VMWEs\n' + parentTxt
def getTokens(elemlist):
if str(elemlist.__class__) == 'corpus.Token': # isinstance(elemlist, Token):
return [elemlist]
if isinstance(elemlist, list):
result = []
for elem in elemlist:
if str(elem.__class__) == 'corpus.Token':
result.append(elem)
elif isinstance(elem, list) and len(elem) == 1 and isinstance(elem[0], list):
result.extend(getTokens(elem[0]))
elif isinstance(elem, list) and len(elem):
result.extend(getTokens(elem))
return result
return [elemlist] |
nesdis/djongo | refs/heads/master | tests/django_tests/tests/v21/tests/test_runner/test_debug_sql.py | 76 | import unittest
from io import StringIO
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
class PassingSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name='subtest-pass').count()
class FailingSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name='subtest-fail').count()
self.fail()
class ErrorSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name='subtest-error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
suite.addTest(self.PassingSubTest())
suite.addTest(self.FailingSubTest())
suite.addTest(self.ErrorSubTest())
old_config = runner.setup_databases()
stream = StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
expected_outputs = [
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'error';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'fail';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'subtest-error';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'subtest-fail';'''),
]
verbose_expected_outputs = [
'runTest (test_runner.test_debug_sql.TestDebugSQL.FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.TestDebugSQL.ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.TestDebugSQL.PassingTest) ... ok',
# If there are errors/failures in subtests but not in test itself,
# the status is not written. That behavior comes from Python.
'runTest (test_runner.test_debug_sql.TestDebugSQL.FailingSubTest) ...',
'runTest (test_runner.test_debug_sql.TestDebugSQL.ErrorSubTest) ...',
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'pass';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'subtest-pass';'''),
]
|
bowang/tensorflow | refs/heads/master | tensorflow/contrib/layers/python/layers/initializers_test.py | 111 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializerTest(test.TestCase):
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.xavier_initializer(dtype=dtypes.int32)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
def _test_xavier(self, initializer, shape, variance, uniform):
with session.Session() as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_xavier_uniform(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), True)
def test_xavier_normal(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), False)
def test_xavier_scalar(self):
self._test_xavier(initializers.xavier_initializer, [], 0.0, True)
def test_xavier_conv2d_uniform(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), True)
def test_xavier_conv2d_normal(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), False)
class VarianceScalingInitializerTest(test.TestCase):
def test_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.variance_scaling_initializer(dtype=dtypes.int32)
initializer = initializers.variance_scaling_initializer()
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializer([], dtype=dtypes.int32)
def _test_variance(self, initializer, shape, variance, factor, mode, uniform):
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
factor=factor, mode=mode, uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 40.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=4. / (100. + 40.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_conv2d_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 5.),
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_conv2d_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 7.),
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_conv2d_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_xavier_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_scalar(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[],
variance=0.0,
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_conv2d_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_conv2d_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
test.main()
|
prakxys/flask | refs/heads/master | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 355 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
twz915/django-minicms | refs/heads/master | DjangoUeditor/utils.py | 1 | # -*- coding: utf-8 -*-
# 文件大小类
from django.utils import six
if six.PY3:
long = int
class FileSize():
SIZE_UNIT = {
"Byte": 1, "KB": 1024, "MB": 1048576,
"GB": 1073741824, "TB": 1099511627776
}
def __init__(self, size):
self.size = long(FileSize.Format(size))
@staticmethod
def Format(size):
import re
if isinstance(size, six.integer_types):
return size
else:
if not isinstance(size, six.string_types):
return 0
else:
oSize = size.lstrip().upper().replace(" ", "")
pattern = re.compile(
r"(\d*\.?(?=\d)\d*)(byte|kb|mb|gb|tb)", re.I)
match = pattern.match(oSize)
if match:
m_size, m_unit = match.groups()
if m_size.find(".") == -1:
m_size = long(m_size)
else:
m_size = float(m_size)
if m_unit != "BYTE":
return m_size * FileSize.SIZE_UNIT[m_unit]
else:
return m_size
else:
return 0
# 返回字节为单位的值
@property
def size(self):
return self._size
@size.setter
def size(self, newsize):
try:
self._size = long(newsize)
except:
self._size = 0
# 返回带单位的自动值
@property
def FriendValue(self):
if self.size < FileSize.SIZE_UNIT["KB"]:
unit = "Byte"
elif self.size < FileSize.SIZE_UNIT["MB"]:
unit = "KB"
elif self.size < FileSize.SIZE_UNIT["GB"]:
unit = "MB"
elif self.size < FileSize.SIZE_UNIT["TB"]:
unit = "GB"
else:
unit = "TB"
if (self.size % FileSize.SIZE_UNIT[unit]) == 0:
return "%s%s" % ((self.size / FileSize.SIZE_UNIT[unit]), unit)
else:
return "%0.2f%s" % (round(float(self.size) / float(
FileSize.SIZE_UNIT[unit]), 2), unit)
def __str__(self):
return self.FriendValue
# 相加
def __add__(self, other):
if isinstance(other, FileSize):
return FileSize(other.size + self.size)
else:
return FileSize(FileSize(other).size + self.size)
def __sub__(self, other):
if isinstance(other, FileSize):
return FileSize(self.size - other.size)
else:
return FileSize(self.size - FileSize(other).size)
def __gt__(self, other):
if isinstance(other, FileSize):
if self.size > other.size:
return True
else:
return False
else:
if self.size > FileSize(other).size:
return True
else:
return False
def __lt__(self, other):
if isinstance(other, FileSize):
if other.size > self.size:
return True
else:
return False
else:
if FileSize(other).size > self.size:
return True
else:
return False
def __ge__(self, other):
if isinstance(other, FileSize):
if self.size >= other.size:
return True
else:
return False
else:
if self.size >= FileSize(other).size:
return True
else:
return False
def __le__(self, other):
if isinstance(other, FileSize):
if other.size >= self.size:
return True
else:
return False
else:
if FileSize(other).size >= self.size:
return True
else:
return False
|
mSenyor/sl4a | refs/heads/master | python/src/Mac/BuildScript/build-installer.py | 22 | #!/usr/bin/python
"""
This script is used to build the "official unofficial" universal build on
Mac OS X. It requires Mac OS X 10.4, Xcode 2.2 and the 10.4u SDK to do its
work. 64-bit or four-way universal builds require at least OS X 10.5 and
the 10.5 SDK.
Please ensure that this script keeps working with Python 2.3, to avoid
bootstrap issues (/usr/bin/python is Python 2.3 on OSX 10.4)
Usage: see USAGE variable in the script.
"""
import platform, os, sys, getopt, textwrap, shutil, urllib2, stat, time, pwd
import grp
INCLUDE_TIMESTAMP = 1
VERBOSE = 1
from plistlib import Plist
import MacOS
try:
from plistlib import writePlist
except ImportError:
# We're run using python2.3
def writePlist(plist, path):
plist.write(path)
def shellQuote(value):
"""
Return the string value in a form that can safely be inserted into
a shell command.
"""
return "'%s'"%(value.replace("'", "'\"'\"'"))
def grepValue(fn, variable):
variable = variable + '='
for ln in open(fn, 'r'):
if ln.startswith(variable):
value = ln[len(variable):].strip()
return value[1:-1]
def getVersion():
return grepValue(os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
def getFullVersion():
fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h')
for ln in open(fn):
if 'PY_VERSION' in ln:
return ln.split()[-1][1:-1]
raise RuntimeError, "Cannot find full version??"
# The directory we'll use to create the build (will be erased and recreated)
WORKDIR = "/tmp/_py"
# The directory we'll use to store third-party sources. Set this to something
# else if you don't want to re-fetch required libraries every time.
DEPSRC = os.path.join(WORKDIR, 'third-party')
DEPSRC = os.path.expanduser('~/Universal/other-sources')
# Location of the preferred SDK
SDKPATH = "/Developer/SDKs/MacOSX10.4u.sdk"
#SDKPATH = "/"
universal_opts_map = { '32-bit': ('i386', 'ppc',),
'64-bit': ('x86_64', 'ppc64',),
'all': ('i386', 'ppc', 'x86_64', 'ppc64',) }
UNIVERSALOPTS = tuple(universal_opts_map.keys())
UNIVERSALARCHS = '32-bit'
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
# Source directory (asume we're in Mac/BuildScript)
SRCDIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__
))))
# $MACOSX_DEPLOYMENT_TARGET -> minimum OS X level
DEPTARGET = '10.3'
USAGE = textwrap.dedent("""\
Usage: build_python [options]
Options:
-? or -h: Show this message
-b DIR
--build-dir=DIR: Create build here (default: %(WORKDIR)r)
--third-party=DIR: Store third-party sources here (default: %(DEPSRC)r)
--sdk-path=DIR: Location of the SDK (default: %(SDKPATH)r)
--src-dir=DIR: Location of the Python sources (default: %(SRCDIR)r)
--dep-target=10.n OS X deployment target (default: %(DEPTARGET)r)
--universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r)
""")% globals()
# Instructions for building libraries that are necessary for building a
# batteries included python.
# [The recipes are defined here for convenience but instantiated later after
# command line options have been processed.]
def library_recipes():
return [
dict(
name="Bzip2 1.0.4",
url="http://www.bzip.org/1.0.4/bzip2-1.0.4.tar.gz",
checksum='fc310b254f6ba5fbb5da018f04533688',
configure=None,
install='make install PREFIX=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
),
),
dict(
name="ZLib 1.2.3",
url="http://www.gzip.org/zlib/zlib-1.2.3.tar.gz",
checksum='debc62758716a169df9f62e6ab2bc634',
configure=None,
install='make install prefix=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
),
),
dict(
# Note that GNU readline is GPL'd software
name="GNU Readline 5.1.4",
url="http://ftp.gnu.org/pub/gnu/readline/readline-5.1.tar.gz" ,
checksum='7ee5a692db88b30ca48927a13fd60e46',
patchlevel='0',
patches=[
# The readline maintainers don't do actual micro releases, but
# just ship a set of patches.
'http://ftp.gnu.org/pub/gnu/readline/readline-5.1-patches/readline51-001',
'http://ftp.gnu.org/pub/gnu/readline/readline-5.1-patches/readline51-002',
'http://ftp.gnu.org/pub/gnu/readline/readline-5.1-patches/readline51-003',
'http://ftp.gnu.org/pub/gnu/readline/readline-5.1-patches/readline51-004',
]
),
dict(
name="SQLite 3.6.11",
url="http://www.sqlite.org/sqlite-3.6.11.tar.gz",
checksum='7ebb099696ab76cc6ff65dd496d17858',
configure_pre=[
'--enable-threadsafe',
'--enable-tempstore',
'--enable-shared=no',
'--enable-static=yes',
'--disable-tcl',
]
),
dict(
name="NCurses 5.5",
url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.5.tar.gz",
checksum='e73c1ac10b4bfc46db43b2ddfd6244ef',
configure_pre=[
"--without-cxx",
"--without-ada",
"--without-progs",
"--without-curses-h",
"--enable-shared",
"--with-shared",
"--datadir=/usr/share",
"--sysconfdir=/etc",
"--sharedstatedir=/usr/com",
"--with-terminfo-dirs=/usr/share/terminfo",
"--with-default-terminfo-dir=/usr/share/terminfo",
"--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
"--enable-termcap",
],
patches=[
"ncurses-5.5.patch",
],
useLDFlags=False,
install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
shellQuote(os.path.join(WORKDIR, 'libraries')),
getVersion(),
),
),
dict(
name="Sleepycat DB 4.7.25",
url="http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz",
checksum='ec2b87e833779681a0c3a814aa71359e',
buildDir="build_unix",
configure="../dist/configure",
configure_pre=[
'--includedir=/usr/local/include/db4',
]
),
]
# Instructions for building packages inside the .mpkg.
PKG_RECIPES = [
dict(
name="PythonFramework",
long_name="Python Framework",
source="/Library/Frameworks/Python.framework",
readme="""\
This package installs Python.framework, that is the python
interpreter and the standard library. This also includes Python
wrappers for lots of Mac OS X API's.
""",
postflight="scripts/postflight.framework",
),
dict(
name="PythonApplications",
long_name="GUI Applications",
source="/Applications/Python %(VER)s",
readme="""\
This package installs IDLE (an interactive Python IDE),
Python Launcher and Build Applet (create application bundles
from python scripts).
It also installs a number of examples and demos.
""",
required=False,
),
dict(
name="PythonUnixTools",
long_name="UNIX command-line tools",
source="/usr/local/bin",
readme="""\
This package installs the unix tools in /usr/local/bin for
compatibility with older releases of Python. This package
is not necessary to use Python.
""",
required=False,
),
dict(
name="PythonDocumentation",
long_name="Python Documentation",
topdir="/Library/Frameworks/Python.framework/Versions/%(VER)s/Resources/English.lproj/Documentation",
source="/pydocs",
readme="""\
This package installs the python documentation at a location
that is useable for pydoc and IDLE. If you have installed Xcode
it will also install a link to the documentation in
/Developer/Documentation/Python
""",
postflight="scripts/postflight.documentation",
required=False,
),
dict(
name="PythonProfileChanges",
long_name="Shell profile updater",
readme="""\
This packages updates your shell profile to make sure that
the Python tools are found by your shell in preference of
the system provided Python tools.
If you don't install this package you'll have to add
"/Library/Frameworks/Python.framework/Versions/%(VER)s/bin"
to your PATH by hand.
""",
postflight="scripts/postflight.patch-profile",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
),
dict(
name="PythonSystemFixes",
long_name="Fix system Python",
readme="""\
This package updates the system python installation on
Mac OS X 10.3 to ensure that you can build new python extensions
using that copy of python after installing this version.
""",
postflight="../Tools/fixapplepython23.py",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
)
]
def fatal(msg):
"""
A fatal error, bail out.
"""
sys.stderr.write('FATAL: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(1)
def fileContents(fn):
"""
Return the contents of the named file
"""
return open(fn, 'rb').read()
def runCommand(commandline):
"""
Run a command and raise RuntimeError if it fails. Output is surpressed
unless the command fails.
"""
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError, "command failed: %s"%(commandline,)
if VERBOSE:
sys.stdout.write(data); sys.stdout.flush()
def captureCommand(commandline):
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError, "command failed: %s"%(commandline,)
return data
def checkEnvironment():
"""
Check that we're running on a supported system.
"""
if platform.system() != 'Darwin':
fatal("This script should be run on a Mac OS X 10.4 system")
if platform.release() <= '8.':
fatal("This script should be run on a Mac OS X 10.4 system")
if not os.path.exists(SDKPATH):
fatal("Please install the latest version of Xcode and the %s SDK"%(
os.path.basename(SDKPATH[:-4])))
def parseOptions(args=None):
"""
Parse arguments and update global settings.
"""
global WORKDIR, DEPSRC, SDKPATH, SRCDIR, DEPTARGET
global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST
if args is None:
args = sys.argv[1:]
try:
options, args = getopt.getopt(args, '?hb',
[ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=',
'dep-target=', 'universal-archs=', 'help' ])
except getopt.error, msg:
print msg
sys.exit(1)
if args:
print "Additional arguments"
sys.exit(1)
for k, v in options:
if k in ('-h', '-?', '--help'):
print USAGE
sys.exit(0)
elif k in ('-d', '--build-dir'):
WORKDIR=v
elif k in ('--third-party',):
DEPSRC=v
elif k in ('--sdk-path',):
SDKPATH=v
elif k in ('--src-dir',):
SRCDIR=v
elif k in ('--dep-target', ):
DEPTARGET=v
elif k in ('--universal-archs', ):
if v in UNIVERSALOPTS:
UNIVERSALARCHS = v
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
else:
raise NotImplementedError, v
else:
raise NotImplementedError, k
SRCDIR=os.path.abspath(SRCDIR)
WORKDIR=os.path.abspath(WORKDIR)
SDKPATH=os.path.abspath(SDKPATH)
DEPSRC=os.path.abspath(DEPSRC)
print "Settings:"
print " * Source directory:", SRCDIR
print " * Build directory: ", WORKDIR
print " * SDK location: ", SDKPATH
print " * Third-party source:", DEPSRC
print " * Deployment target:", DEPTARGET
print " * Universal architectures:", ARCHLIST
print ""
def extractArchive(builddir, archiveName):
"""
Extract a source archive into 'builddir'. Returns the path of the
extracted archive.
XXX: This function assumes that archives contain a toplevel directory
that is has the same name as the basename of the archive. This is
save enough for anything we use.
"""
curdir = os.getcwd()
try:
os.chdir(builddir)
if archiveName.endswith('.tar.gz'):
retval = os.path.basename(archiveName[:-7])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar zxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar.bz2'):
retval = os.path.basename(archiveName[:-8])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar jxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar xf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.zip'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("unzip %s 2>&1"%(shellQuote(archiveName),), 'r')
data = fp.read()
xit = fp.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError, "Cannot extract %s"%(archiveName,)
return os.path.join(builddir, retval)
finally:
os.chdir(curdir)
KNOWNSIZES = {
"http://ftp.gnu.org/pub/gnu/readline/readline-5.1.tar.gz": 7952742,
"http://downloads.sleepycat.com/db-4.4.20.tar.gz": 2030276,
}
def downloadURL(url, fname):
"""
Download the contents of the url into the file.
"""
try:
size = os.path.getsize(fname)
except OSError:
pass
else:
if KNOWNSIZES.get(url) == size:
print "Using existing file for", url
return
fpIn = urllib2.urlopen(url)
fpOut = open(fname, 'wb')
block = fpIn.read(10240)
try:
while block:
fpOut.write(block)
block = fpIn.read(10240)
fpIn.close()
fpOut.close()
except:
try:
os.unlink(fname)
except:
pass
def buildRecipe(recipe, basedir, archList):
"""
Build software using a recipe. This function does the
'configure;make;make install' dance for C software, with a possibility
to customize this process, basically a poor-mans DarwinPorts.
"""
curdir = os.getcwd()
name = recipe['name']
url = recipe['url']
configure = recipe.get('configure', './configure')
install = recipe.get('install', 'make && make install DESTDIR=%s'%(
shellQuote(basedir)))
archiveName = os.path.split(url)[-1]
sourceArchive = os.path.join(DEPSRC, archiveName)
if not os.path.exists(DEPSRC):
os.mkdir(DEPSRC)
if os.path.exists(sourceArchive):
print "Using local copy of %s"%(name,)
else:
print "Did not find local copy of %s"%(name,)
print "Downloading %s"%(name,)
downloadURL(url, sourceArchive)
print "Archive for %s stored as %s"%(name, sourceArchive)
print "Extracting archive for %s"%(name,)
buildDir=os.path.join(WORKDIR, '_bld')
if not os.path.exists(buildDir):
os.mkdir(buildDir)
workDir = extractArchive(buildDir, sourceArchive)
os.chdir(workDir)
if 'buildDir' in recipe:
os.chdir(recipe['buildDir'])
for fn in recipe.get('patches', ()):
if fn.startswith('http://'):
# Download the patch before applying it.
path = os.path.join(DEPSRC, os.path.basename(fn))
downloadURL(fn, path)
fn = path
fn = os.path.join(curdir, fn)
runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1),
shellQuote(fn),))
if configure is not None:
configure_args = [
"--prefix=/usr/local",
"--enable-static",
"--disable-shared",
#"CPP=gcc -arch %s -E"%(' -arch '.join(archList,),),
]
if 'configure_pre' in recipe:
args = list(recipe['configure_pre'])
if '--disable-static' in args:
configure_args.remove('--enable-static')
if '--enable-shared' in args:
configure_args.remove('--disable-shared')
configure_args.extend(args)
if recipe.get('useLDFlags', 1):
configure_args.extend([
"CFLAGS=-arch %s -isysroot %s -I%s/usr/local/include"%(
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
"LDFLAGS=-syslibroot,%s -L%s/usr/local/lib -arch %s"%(
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],
' -arch '.join(archList)),
])
else:
configure_args.extend([
"CFLAGS=-arch %s -isysroot %s -I%s/usr/local/include"%(
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
])
if 'configure_post' in recipe:
configure_args = configure_args = list(recipe['configure_post'])
configure_args.insert(0, configure)
configure_args = [ shellQuote(a) for a in configure_args ]
print "Running configure for %s"%(name,)
runCommand(' '.join(configure_args) + ' 2>&1')
print "Running install for %s"%(name,)
runCommand('{ ' + install + ' ;} 2>&1')
print "Done %s"%(name,)
print ""
os.chdir(curdir)
def buildLibraries():
"""
Build our dependencies into $WORKDIR/libraries/usr/local
"""
print ""
print "Building required libraries"
print ""
universal = os.path.join(WORKDIR, 'libraries')
os.mkdir(universal)
os.makedirs(os.path.join(universal, 'usr', 'local', 'lib'))
os.makedirs(os.path.join(universal, 'usr', 'local', 'include'))
for recipe in library_recipes():
buildRecipe(recipe, universal, ARCHLIST)
def buildPythonDocs():
# This stores the documentation as Resources/English.lproj/Documentation
# inside the framwork. pydoc and IDLE will pick it up there.
print "Install python documentation"
rootDir = os.path.join(WORKDIR, '_root')
buildDir = os.path.join('../../Doc')
docdir = os.path.join(rootDir, 'pydocs')
curDir = os.getcwd()
os.chdir(buildDir)
runCommand('make update')
runCommand('make html')
os.chdir(curDir)
if not os.path.exists(docdir):
os.mkdir(docdir)
os.rename(os.path.join(buildDir, 'build', 'html'),
os.path.join(docdir, 'python-docs-html'))
def buildPython():
print "Building a universal python for %s architectures" % UNIVERSALARCHS
buildDir = os.path.join(WORKDIR, '_bld', 'python')
rootDir = os.path.join(WORKDIR, '_root')
if os.path.exists(buildDir):
shutil.rmtree(buildDir)
if os.path.exists(rootDir):
shutil.rmtree(rootDir)
os.mkdir(buildDir)
os.mkdir(rootDir)
os.mkdir(os.path.join(rootDir, 'empty-dir'))
curdir = os.getcwd()
os.chdir(buildDir)
# Not sure if this is still needed, the original build script
# claims that parts of the install assume python.exe exists.
os.symlink('python', os.path.join(buildDir, 'python.exe'))
# Extract the version from the configure file, needed to calculate
# several paths.
version = getVersion()
# Since the extra libs are not in their installed framework location
# during the build, augment the library path so that the interpreter
# will find them during its extension import sanity checks.
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(WORKDIR,
'libraries', 'usr', 'local', 'lib')
print "Running configure..."
runCommand("%s -C --enable-framework --enable-universalsdk=%s "
"--with-universal-archs=%s "
"LDFLAGS='-g -L%s/libraries/usr/local/lib' "
"OPT='-g -O3 -I%s/libraries/usr/local/include' 2>&1"%(
shellQuote(os.path.join(SRCDIR, 'configure')), shellQuote(SDKPATH),
UNIVERSALARCHS,
shellQuote(WORKDIR)[1:-1],
shellQuote(WORKDIR)[1:-1]))
print "Running make"
runCommand("make")
print "Running make frameworkinstall"
runCommand("make frameworkinstall DESTDIR=%s"%(
shellQuote(rootDir)))
print "Running make frameworkinstallextras"
runCommand("make frameworkinstallextras DESTDIR=%s"%(
shellQuote(rootDir)))
del os.environ['DYLD_LIBRARY_PATH']
print "Copying required shared libraries"
if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')):
runCommand("mv %s/* %s"%(
shellQuote(os.path.join(
WORKDIR, 'libraries', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(),
'lib')),
shellQuote(os.path.join(WORKDIR, '_root', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(),
'lib'))))
print "Fix file modes"
frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework')
gid = grp.getgrnam('admin').gr_gid
for dirpath, dirnames, filenames in os.walk(frmDir):
for dn in dirnames:
os.chmod(os.path.join(dirpath, dn), 0775)
os.chown(os.path.join(dirpath, dn), -1, gid)
for fn in filenames:
if os.path.islink(fn):
continue
# "chmod g+w $fn"
p = os.path.join(dirpath, fn)
st = os.stat(p)
os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP)
os.chown(p, -1, gid)
# We added some directories to the search path during the configure
# phase. Remove those because those directories won't be there on
# the end-users system.
path =os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework',
'Versions', version, 'lib', 'python%s'%(version,),
'config', 'Makefile')
fp = open(path, 'r')
data = fp.read()
fp.close()
data = data.replace('-L%s/libraries/usr/local/lib'%(WORKDIR,), '')
data = data.replace('-I%s/libraries/usr/local/include'%(WORKDIR,), '')
fp = open(path, 'w')
fp.write(data)
fp.close()
# Add symlinks in /usr/local/bin, using relative links
usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin')
to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks',
'Python.framework', 'Versions', version, 'bin')
if os.path.exists(usr_local_bin):
shutil.rmtree(usr_local_bin)
os.makedirs(usr_local_bin)
for fn in os.listdir(
os.path.join(frmDir, 'Versions', version, 'bin')):
os.symlink(os.path.join(to_framework, fn),
os.path.join(usr_local_bin, fn))
os.chdir(curdir)
def patchFile(inPath, outPath):
data = fileContents(inPath)
data = data.replace('$FULL_VERSION', getFullVersion())
data = data.replace('$VERSION', getVersion())
data = data.replace('$MACOSX_DEPLOYMENT_TARGET', ''.join((DEPTARGET, ' or later')))
data = data.replace('$ARCHITECTURES', "i386, ppc")
data = data.replace('$INSTALL_SIZE', installSize())
# This one is not handy as a template variable
data = data.replace('$PYTHONFRAMEWORKINSTALLDIR', '/Library/Frameworks/Python.framework')
fp = open(outPath, 'wb')
fp.write(data)
fp.close()
def patchScript(inPath, outPath):
data = fileContents(inPath)
data = data.replace('@PYVER@', getVersion())
fp = open(outPath, 'wb')
fp.write(data)
fp.close()
os.chmod(outPath, 0755)
def packageFromRecipe(targetDir, recipe):
curdir = os.getcwd()
try:
# The major version (such as 2.5) is included in the package name
# because having two version of python installed at the same time is
# common.
pkgname = '%s-%s'%(recipe['name'], getVersion())
srcdir = recipe.get('source')
pkgroot = recipe.get('topdir', srcdir)
postflight = recipe.get('postflight')
readme = textwrap.dedent(recipe['readme'])
isRequired = recipe.get('required', True)
print "- building package %s"%(pkgname,)
# Substitute some variables
textvars = dict(
VER=getVersion(),
FULLVER=getFullVersion(),
)
readme = readme % textvars
if pkgroot is not None:
pkgroot = pkgroot % textvars
else:
pkgroot = '/'
if srcdir is not None:
srcdir = os.path.join(WORKDIR, '_root', srcdir[1:])
srcdir = srcdir % textvars
if postflight is not None:
postflight = os.path.abspath(postflight)
packageContents = os.path.join(targetDir, pkgname + '.pkg', 'Contents')
os.makedirs(packageContents)
if srcdir is not None:
os.chdir(srcdir)
runCommand("pax -wf %s . 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("gzip -9 %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("mkbom . %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.bom')),))
fn = os.path.join(packageContents, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
rsrcDir = os.path.join(packageContents, "Resources")
os.mkdir(rsrcDir)
fp = open(os.path.join(rsrcDir, 'ReadMe.txt'), 'w')
fp.write(readme)
fp.close()
if postflight is not None:
patchScript(postflight, os.path.join(rsrcDir, 'postflight'))
vers = getFullVersion()
major, minor = map(int, getVersion().split('.', 2))
pl = Plist(
CFBundleGetInfoString="Python.%s %s"%(pkgname, vers,),
CFBundleIdentifier='org.python.Python.%s'%(pkgname,),
CFBundleName='Python.%s'%(pkgname,),
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction="RootAuthorization",
IFPkgFlagDefaultLocation=pkgroot,
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=True,
IFPkgFlagIsRequired=isRequired,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction="NoRestart",
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
)
writePlist(pl, os.path.join(packageContents, 'Info.plist'))
pl = Plist(
IFPkgDescriptionDescription=readme,
IFPkgDescriptionTitle=recipe.get('long_name', "Python.%s"%(pkgname,)),
IFPkgDescriptionVersion=vers,
)
writePlist(pl, os.path.join(packageContents, 'Resources', 'Description.plist'))
finally:
os.chdir(curdir)
def makeMpkgPlist(path):
vers = getFullVersion()
major, minor = map(int, getVersion().split('.', 2))
pl = Plist(
CFBundleGetInfoString="Python %s"%(vers,),
CFBundleIdentifier='org.python.Python',
CFBundleName='Python',
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFlagComponentDirectory="Contents/Packages",
IFPkgFlagPackageList=[
dict(
IFPkgFlagPackageLocation='%s-%s.pkg'%(item['name'], getVersion()),
IFPkgFlagPackageSelection='selected'
)
for item in PKG_RECIPES
],
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagBackgroundScaling="proportional",
IFPkgFlagBackgroundAlignment="left",
IFPkgFlagAuthorizationAction="RootAuthorization",
)
writePlist(pl, path)
def buildInstaller():
# Zap all compiled files
for dirpath, _, filenames in os.walk(os.path.join(WORKDIR, '_root')):
for fn in filenames:
if fn.endswith('.pyc') or fn.endswith('.pyo'):
os.unlink(os.path.join(dirpath, fn))
outdir = os.path.join(WORKDIR, 'installer')
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
pkgroot = os.path.join(outdir, 'Python.mpkg', 'Contents')
pkgcontents = os.path.join(pkgroot, 'Packages')
os.makedirs(pkgcontents)
for recipe in PKG_RECIPES:
packageFromRecipe(pkgcontents, recipe)
rsrcDir = os.path.join(pkgroot, 'Resources')
fn = os.path.join(pkgroot, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
os.mkdir(rsrcDir)
makeMpkgPlist(os.path.join(pkgroot, 'Info.plist'))
pl = Plist(
IFPkgDescriptionTitle="Python",
IFPkgDescriptionVersion=getVersion(),
)
writePlist(pl, os.path.join(pkgroot, 'Resources', 'Description.plist'))
for fn in os.listdir('resources'):
if fn == '.svn': continue
if fn.endswith('.jpg'):
shutil.copy(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
else:
patchFile(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
shutil.copy("../../LICENSE", os.path.join(rsrcDir, 'License.txt'))
def installSize(clear=False, _saved=[]):
if clear:
del _saved[:]
if not _saved:
data = captureCommand("du -ks %s"%(
shellQuote(os.path.join(WORKDIR, '_root'))))
_saved.append("%d"%((0.5 + (int(data.split()[0]) / 1024.0)),))
return _saved[0]
def buildDMG():
"""
Create DMG containing the rootDir.
"""
outdir = os.path.join(WORKDIR, 'diskimage')
if os.path.exists(outdir):
shutil.rmtree(outdir)
imagepath = os.path.join(outdir,
'python-%s-macosx'%(getFullVersion(),))
if INCLUDE_TIMESTAMP:
imagepath = imagepath + '%04d-%02d-%02d'%(time.localtime()[:3])
imagepath = imagepath + '.dmg'
os.mkdir(outdir)
volname='Python %s'%(getFullVersion())
runCommand("hdiutil create -format UDRW -volname %s -srcfolder %s %s"%(
shellQuote(volname),
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath + ".tmp.dmg" )))
if not os.path.exists(os.path.join(WORKDIR, "mnt")):
os.mkdir(os.path.join(WORKDIR, "mnt"))
runCommand("hdiutil attach %s -mountroot %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(os.path.join(WORKDIR, "mnt"))))
# Custom icon for the DMG, shown when the DMG is mounted.
shutil.copy("../Icons/Disk Image.icns",
os.path.join(WORKDIR, "mnt", volname, ".VolumeIcon.icns"))
runCommand("/Developer/Tools/SetFile -a C %s/"%(
shellQuote(os.path.join(WORKDIR, "mnt", volname)),))
runCommand("hdiutil detach %s"%(shellQuote(os.path.join(WORKDIR, "mnt", volname))))
setIcon(imagepath + ".tmp.dmg", "../Icons/Disk Image.icns")
runCommand("hdiutil convert %s -format UDZO -o %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(imagepath)))
setIcon(imagepath, "../Icons/Disk Image.icns")
os.unlink(imagepath + ".tmp.dmg")
return imagepath
def setIcon(filePath, icnsPath):
"""
Set the custom icon for the specified file or directory.
"""
toolPath = os.path.join(os.path.dirname(__file__), "seticon.app/Contents/MacOS/seticon")
dirPath = os.path.dirname(__file__)
if not os.path.exists(toolPath) or os.stat(toolPath).st_mtime < os.stat(dirPath + '/seticon.m').st_mtime:
# NOTE: The tool is created inside an .app bundle, otherwise it won't work due
# to connections to the window server.
if not os.path.exists('seticon.app/Contents/MacOS'):
os.makedirs('seticon.app/Contents/MacOS')
runCommand("cc -o %s %s/seticon.m -framework Cocoa"%(
shellQuote(toolPath), shellQuote(dirPath)))
runCommand("%s %s %s"%(shellQuote(os.path.abspath(toolPath)), shellQuote(icnsPath),
shellQuote(filePath)))
def main():
# First parse options and check if we can perform our work
parseOptions()
checkEnvironment()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = DEPTARGET
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.mkdir(WORKDIR)
# Then build third-party libraries such as sleepycat DB4.
buildLibraries()
# Now build python itself
buildPython()
# And then build the documentation
# Remove the Deployment Target from the shell
# environment, it's no longer needed and
# an unexpected build target can cause problems
# when Sphinx and its dependencies need to
# be (re-)installed.
del os.environ['MACOSX_DEPLOYMENT_TARGET']
buildPythonDocs()
# Prepare the applications folder
fn = os.path.join(WORKDIR, "_root", "Applications",
"Python %s"%(getVersion(),), "Update Shell Profile.command")
patchScript("scripts/postflight.patch-profile", fn)
folder = os.path.join(WORKDIR, "_root", "Applications", "Python %s"%(
getVersion(),))
os.chmod(folder, 0755)
setIcon(folder, "../Icons/Python Folder.icns")
# Create the installer
buildInstaller()
# And copy the readme into the directory containing the installer
patchFile('resources/ReadMe.txt', os.path.join(WORKDIR, 'installer', 'ReadMe.txt'))
# Ditto for the license file.
shutil.copy('../../LICENSE', os.path.join(WORKDIR, 'installer', 'License.txt'))
fp = open(os.path.join(WORKDIR, 'installer', 'Build.txt'), 'w')
print >> fp, "# BUILD INFO"
print >> fp, "# Date:", time.ctime()
print >> fp, "# By:", pwd.getpwuid(os.getuid()).pw_gecos
fp.close()
# And copy it to a DMG
buildDMG()
if __name__ == "__main__":
main()
|
kubow/HAC | refs/heads/master | System/UI74AJ.py | 1 | #!/usr/bin/python3
from appJar import gui
from OS74 import FileSystemObject
from DB74 import DataBaseObject
from Template import SQL
def opt_changed():
result_set = db_obj.return_many('SELECT * FROM {0};'.format(app.getOptionBox("optionbox")))
first_columns = [record[0] for record in result_set]
app.clearListBox("list", callFunction=False)
app.addListItems("list", first_columns)
i = 1
result_flds = db_obj.object_structure(app.getOptionBox("optionbox"))
for field in result_flds:
if i > 9:
break
app.setLabel('en' + str(i), field)
i += 1
for j in range(9):
if i > 9:
break
app.setLabel('en' + str(i), '')
i += 1
def lst_changed():
table = app.getOptionBox("optionbox")
field_name = app.getLabel('en1')
if app.getListBox("list"):
record_value = app.getListBox("list")[0]
print('retrieving ' + field_name + ': ' + record_value)
result_vals = db_obj.return_many(SQL.select_where.format('*', table, field_name + '= "' + record_value + '"'))
if result_vals:
i = 1
for field in result_vals[0]:
if i > 9:
break
app.setEntry('e' + str(i), field)
i += 1
def press(btn):
if btn == "Storno":
app.stop()
elif btn == "Zapsat":
list_select()
else:
print('not defined yet')
def list_select():
app.infoBox("Info", "You selected " + app.getOptionBox("optionbox") + "\nBrowsing " + app.getListBox("list")[0][0])
root = FileSystemObject().dir_up(2)
print(root)
db_file = FileSystemObject(root).append_objects(file="H808E_tab.db")
db_obj = DataBaseObject(db_file)
db_obj_list = [obj[0] for obj in db_obj.view_list]
app = gui("Database Editor", "800x500")
app.setPadding(10, 10)
app.setFont(12)
# app.addHorizontalSeparator(0,0,4, colour="red")
app.addLabel("title", "Hvezdna encyklopedie", 0, 0, 2)
app.addOptionBox("optionbox", db_obj_list, 0, 2)
app.addLabel("en1", "nazev", 1, 0)
app.addEntry("e1", 1, 1)
app.addLabel("en2", "hodnota", 2, 0)
app.addEntry("e2", 2, 1)
app.addLabel("en3", "neco_jineho", 3, 0)
app.addEntry("e3", 3, 1)
app.addLabel("en4", "dalsi", 4, 0)
app.addEntry("e4", 4, 1)
app.addLabel("en5", "jeste_jedna", 5, 0)
app.addEntry("e5", 5, 1)
app.addLabel("en6", "neco_jinakeho", 6, 0)
app.addEntry("e6", 6, 1)
app.addLabel("en7", "blabla", 7, 0)
app.addEntry("e7", 7, 1)
app.addLabel("en8", "hohoho", 8, 0)
app.addEntry("e8", 8, 1)
app.addLabel("en9", "neco_neco", 9, 0)
app.addEntry("e9", 9, 1)
app.addListBox("list", db_obj.return_many('SELECT nazev FROM suroviny;'), 1, 2, 1, 9)
app.addButtons(["Zapsat", "Storno"], press, 10, 0, 2)
app.setOptionBoxChangeFunction("optionbox", opt_changed)
app.setListBoxChangeFunction("list", lst_changed)
# start the GUI
app.go()
|
QinerTech/QinerApps | refs/heads/master | openerp/addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py | 46 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
|
richard-willowit/odoo | refs/heads/master | addons/website_twitter/controllers/main.py | 34 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import _
from odoo import http
from odoo.http import request
class Twitter(http.Controller):
@http.route(['/twitter_reload'], type='json', auth="user", website=True)
def twitter_reload(self):
return request.website.fetch_favorite_tweets()
@http.route(['/get_favorites'], type='json', auth="public", website=True)
def get_tweets(self, limit=20):
key = request.website.twitter_api_key
secret = request.website.twitter_api_secret
screen_name = request.website.twitter_screen_name
debug = request.env['res.users'].has_group('website.group_website_publisher')
if not key or not secret:
if debug:
return {"error": _("Please set the Twitter API Key and Secret in the Website Settings.")}
return []
if not screen_name:
if debug:
return {"error": _("Please set a Twitter screen name to load favorites from, "
"in the Website Settings (it does not have to be yours)")}
return []
TwitterTweets = request.env['website.twitter.tweet']
tweets = TwitterTweets.search(
[('website_id', '=', request.website.id),
('screen_name', '=', screen_name)],
limit=int(limit), order="tweet_id desc")
if len(tweets) < 12:
if debug:
return {"error": _("Twitter user @%(username)s has less than 12 favorite tweets. "
"Please add more or choose a different screen name.") % \
{'username': screen_name}}
else:
return []
return tweets.mapped(lambda t: json.loads(t.tweet))
|
dalduba/pythonqt | refs/heads/master | examples/PyGettingStarted/GettingStarted.py | 20 | from PythonQt import *
# set the title of the group box, accessing the title property
box.title = 'PythonQt Example'
# set the html content of the QTextBrowser
box.browser.html = 'Hello <b>Qt</b>!'
# set the title of the button
box.button1.text = 'Append Text'
# set the text of the line edit
box.edit.text = '42'
# define our own python method that appends the text from the line edit
# to the text browser
def appendLine():
box.browser.append(box.edit.text)
# connect the button's clicked signal to our python method
box.button1.connect('clicked()', appendLine)
# connect the lineedit's returnPressed signal to our python method
box.edit.connect('returnPressed()', appendLine)
# show the window
box.show()
|
britcey/ansible | refs/heads/devel | lib/ansible/modules/packaging/os/urpmi.py | 71 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Philippe Makowski
# Written by Philippe Makowski <philippem@mageia.org>
# Based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: urpmi
short_description: Urpmi manager
description:
- Manages packages with I(urpmi) (such as for Mageia or Mandriva)
version_added: "1.3.4"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(urpmi.update -a).
required: false
default: no
choices: [ "yes", "no" ]
no-recommends:
description:
- Corresponds to the C(--no-recommends) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
force:
description:
- Assume "yes" is the answer to any question urpmi has to ask.
Corresponds to the C(--force) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
author: "Philippe Makowski (@pmakowski)"
notes: []
'''
EXAMPLES = '''
# install package foo
- urpmi:
pkg: foo
state: present
# remove package foo
- urpmi:
pkg: foo
state: absent
# description: remove packages foo and bar
- urpmi:
pkg: foo,bar
state: absent
# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
- urpmi:
name: bar
state: present
update_cache: yes
'''
import shlex
import os
import sys
URPMI_PATH = '/usr/sbin/urpmi'
URPME_PATH = '/usr/sbin/urpme'
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q --provides %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc == 0
def update_package_db(module):
cmd = "urpmi.update -a -q"
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
cmd = "%s --auto %s" % (URPME_PATH, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec, force=True, no_recommends=True):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
if no_recommends:
no_recommends_yes = '--no-recommends'
else:
no_recommends_yes = ''
if force:
force_yes = '--force'
else:
force_yes = ''
cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages))
rc, out, err = module.run_command(cmd)
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# urpmi always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
force = dict(default=True, type='bool'),
no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH):
module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
p = module.params
force_yes = p['force']
no_recommends_yes = p['no_recommends']
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages, force_yes, no_recommends_yes)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
rgom/Pydev | refs/heads/development | plugins/org.python.pydev.refactoring/tests/python/typeinference/import.py | 9 |
import import_package.bar
import import_package.foo
import import_package.baz as baz_alias
from import_package.qux import Qux
from import_package.quux import Quux as Xuuq
from import_package import sub_package
baz_alias ## type baz
import_package ## type import_package
sub_package ## type sub_package
import_package.foo ## type foo
import_package.bar ## type bar
x1 = baz_alias
x2 = import_package.bar
x1 ## type baz
x2 ## type bar
x = baz_alias.Baz()
x ## type Baz
print x.path()
q = Qux()
q ## type Qux
aq = Xuuq()
aq ## type Quux
|
cogeorg/BlackRhino | refs/heads/master | examples/firesales_SA/networkx/readwrite/pajek.py | 22 | """
*****
Pajek
*****
Read graphs in Pajek format.
This implementation handles directed and undirected graphs including
those with self loops and parallel edges.
Format
------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
# Copyright (C) 2008-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import is_string_like, open_file, make_str
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['read_pajek', 'parse_pajek', 'generate_pajek', 'write_pajek']
def generate_pajek(G):
"""Generate lines in Pajek graph format.
Parameters
----------
G : graph
A Networkx graph
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
if G.name=='':
name='NetworkX'
else:
name=G.name
# Apparently many Pajek format readers can't process this line
# So we'll leave it out for now.
# yield '*network %s'%name
# write nodes with attributes
yield '*vertices %s'%(G.order())
nodes = G.nodes()
# make dictionary mapping nodes to integers
nodenumber=dict(zip(nodes,range(1,len(nodes)+1)))
for n in nodes:
na=G.node.get(n,{})
x=na.get('x',0.0)
y=na.get('y',0.0)
id=int(na.get('id',nodenumber[n]))
nodenumber[n]=id
shape=na.get('shape','ellipse')
s=' '.join(map(make_qstr,(id,n,x,y,shape)))
for k,v in na.items():
s+=' %s %s'%(make_qstr(k),make_qstr(v))
yield s
# write edges with attributes
if G.is_directed():
yield '*arcs'
else:
yield '*edges'
for u,v,edgedata in G.edges(data=True):
d=edgedata.copy()
value=d.pop('weight',1.0) # use 1 as default edge value
s=' '.join(map(make_qstr,(nodenumber[u],nodenumber[v],value)))
for k,v in d.items():
s+=' %s %s'%(make_qstr(k),make_qstr(v))
s+=' %s %s'%(k,v)
yield s
@open_file(1,mode='wb')
def write_pajek(G, path, encoding='UTF-8'):
"""Write graph in Pajek format to path.
Parameters
----------
G : graph
A Networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
for line in generate_pajek(G):
line+='\n'
path.write(line.encode(encoding))
@open_file(0,mode='rb')
def read_pajek(path,encoding='UTF-8'):
"""Read graph in Pajek format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be uncompressed.
Returns
-------
G : NetworkX MultiGraph or MultiDiGraph.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
>>> G=nx.read_pajek("test.net")
To create a Graph instead of a MultiGraph use
>>> G1=nx.Graph(G)
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
lines = (line.decode(encoding) for line in path)
return parse_pajek(lines)
def parse_pajek(lines):
"""Parse Pajek format graph from string or iterable.
Parameters
----------
lines : string or iterable
Data in Pajek format.
Returns
-------
G : NetworkX graph
See Also
--------
read_pajek()
"""
import shlex
# multigraph=False
if is_string_like(lines): lines=iter(lines.split('\n'))
lines = iter([line.rstrip('\n') for line in lines])
G=nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes
while lines:
try:
l=next(lines)
except: #EOF
break
if l.lower().startswith("*network"):
label,name=l.split()
G.name=name
if l.lower().startswith("*vertices"):
nodelabels={}
l,nnodes=l.split()
for i in range(int(nnodes)):
splitline=shlex.split(str(next(lines)))
id,label=splitline[0:2]
G.add_node(label)
nodelabels[id]=label
G.node[label]={'id':id}
try:
x,y,shape=splitline[2:5]
G.node[label].update({'x':float(x),
'y':float(y),
'shape':shape})
except:
pass
extra_attr=zip(splitline[5::2],splitline[6::2])
G.node[label].update(extra_attr)
if l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
if l.lower().startswith("*edge"):
# switch from multidigraph to multigraph
G=nx.MultiGraph(G)
if l.lower().startswith("*arcs"):
# switch to directed with multiple arcs for each existing edge
G=G.to_directed()
for l in lines:
splitline=shlex.split(str(l))
if len(splitline)<2:
continue
ui,vi=splitline[0:2]
u=nodelabels.get(ui,ui)
v=nodelabels.get(vi,vi)
# parse the data attached to this edge and put in a dictionary
edge_data={}
try:
# there should always be a single value on the edge?
w=splitline[2:3]
edge_data.update({'weight':float(w[0])})
except:
pass
# if there isn't, just assign a 1
# edge_data.update({'value':1})
extra_attr=zip(splitline[3::2],splitline[4::2])
edge_data.update(extra_attr)
# if G.has_edge(u,v):
# multigraph=True
G.add_edge(u,v,**edge_data)
return G
def make_qstr(t):
"""Return the string representation of t.
Add outer double-quotes if the string has a space.
"""
if not is_string_like(t):
t = str(t)
if " " in t:
t=r'"%s"'%t
return t
# fixture for nose tests
def teardown_module(module):
import os
os.unlink('test.net')
|
wd5/jangr | refs/heads/master | django/contrib/gis/gdal/__init__.py | 397 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
|
Comunitea/l10n-spain | refs/heads/8.0 | l10n_es_aeat/models/aeat_report.py | 7 | # -*- coding: utf-8 -*-
# © 2004-2011 - Pexego Sistemas Informáticos - Luis Manuel Angueira Blanco
# © 2013 - Acysos S.L. - Ignacio Ibeas (Migración a v7)
# © 2014-2016 - Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import fields, models, api, exceptions, SUPERUSER_ID, _
from datetime import datetime
import re
class L10nEsAeatReport(models.AbstractModel):
_name = "l10n.es.aeat.report"
_description = "AEAT report base module"
_rec_name = 'name'
_aeat_number = False
_period_quarterly = True
_period_monthly = True
_period_yearly = False
def _default_company(self):
company_obj = self.env['res.company']
return company_obj._company_default_get('l10n.es.aeat.report')
def _default_journal(self):
return self.env['account.journal'].search(
[('type', '=', 'general')])[:1]
def get_period_type_selection(self):
period_types = []
if self._period_yearly:
period_types += [('0A', '0A - Anual')]
if self._period_quarterly:
period_types += [('1T', '1T - Primer trimestre'),
('2T', '2T - Segundo trimestre'),
('3T', '3T - Tercer trimestre'),
('4T', '4T - Cuarto trimestre')]
if self._period_monthly:
period_types += [('01', '01 - Enero'),
('02', '02 - Febrero'),
('03', '03 - Marzo'),
('04', '04 - Abril'),
('05', '05 - Mayo'),
('06', '06 - Junio'),
('07', '07 - Julio'),
('08', '08 - Agosto'),
('09', '09 - Septiembre'),
('10', '10 - Octubre'),
('11', '11 - Noviembre'),
('12', '12 - Diciembre')]
return period_types
def _default_period_type(self):
selection = self.get_period_type_selection()
return selection and selection[0][0] or False
company_id = fields.Many2one(
'res.company', string='Company', required=True, readonly=True,
default=_default_company, states={'draft': [('readonly', False)]})
company_vat = fields.Char(
string='VAT number', size=9, required=True, readonly=True,
states={'draft': [('readonly', False)]})
number = fields.Char(
string='Declaration number', size=13, required=True, readonly=True)
previous_number = fields.Char(
string='Previous declaration number', size=13,
states={'done': [('readonly', True)]})
contact_name = fields.Char(
string="Full Name", size=40, help="Must have name and surname.",
states={'calculated': [('required', True)],
'confirmed': [('readonly', True)]})
contact_phone = fields.Char(
string="Phone", size=9, states={'calculated': [('required', True)],
'confirmed': [('readonly', True)]})
representative_vat = fields.Char(
string='L.R. VAT number', size=9,
help="Legal Representative VAT number.",
states={'confirmed': [('readonly', True)]})
fiscalyear_id = fields.Many2one(
'account.fiscalyear', string='Fiscal year', required=True,
readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection(
[('N', 'Normal'), ('C', 'Complementary'), ('S', 'Substitutive')],
string='Statement Type', default='N', readonly=True, required=True,
states={'draft': [('readonly', False)]})
support_type = fields.Selection(
[('C', 'DVD'), ('T', 'Telematics')], string='Support Type',
default='T', states={'calculated': [('required', True)],
'done': [('readonly', True)]})
calculation_date = fields.Datetime(string="Calculation date")
state = fields.Selection(
[('draft', 'Draft'),
('calculated', 'Processed'),
('done', 'Done'),
('posted', 'Posted'),
('cancelled', 'Cancelled')], string='State', readonly=True,
default='draft')
name = fields.Char(string="Report identifier", size=13, oldname='sequence')
model = fields.Many2one(
comodel_name="ir.model", compute='_compute_report_model')
export_config = fields.Many2one(
comodel_name='aeat.model.export.config', string='Export config',
domain="[('model', '=', model)]")
period_type = fields.Selection(
selection="get_period_type_selection", string="Period type",
required=True, default=_default_period_type,
readonly=True, states={'draft': [('readonly', False)]})
periods = fields.Many2many(
comodel_name='account.period', readonly=True, string="Period(s)",
states={'draft': [('readonly', False)]})
allow_posting = fields.Boolean(compute="_compute_allow_posting")
counterpart_account = fields.Many2one(
comodel_name="account.account",
help="This account will be the counterpart for all the journal items "
"that are regularized when posting the report.")
journal_id = fields.Many2one(
comodel_name="account.journal", string="Journal",
domain=[('type', '=', 'general')], default=_default_journal,
help="Journal in which post the move.")
move_id = fields.Many2one(
comodel_name="account.move", string="Account entry")
partner_bank_id = fields.Many2one(
comodel_name='res.partner.bank', string='Bank account',
help='Company bank account used for the presentation',
domain="[('state', '=', 'iban'), ('company_id', '=', company_id)]")
_sql_constraints = [
('name_uniq', 'unique(name, company_id)',
'AEAT report identifier must be unique by company'),
]
@api.one
def _compute_report_model(self):
self.model = self.env['ir.model'].search([('model', '=', self._name)])
@api.one
def _compute_allow_posting(self):
self.allow_posting = False
@api.onchange('company_id')
def on_change_company_id(self):
"""Loads some company data (the VAT number) when the selected
company changes.
"""
if self.company_id.vat:
# Remove the ES part from spanish vat numbers
# (ES12345678Z => 12345678Z)
self.company_vat = re.match(
"(ES){0,1}(.*)", self.company_id.vat).groups()[1]
self.contact_name = self.env.user.name
self.contact_phone = self.env.user.partner_id.phone
@api.onchange('period_type', 'fiscalyear_id')
def onchange_period_type(self):
period_model = self.env['account.period']
if not self.fiscalyear_id:
self.periods = False
else:
fy_date_start = fields.Date.from_string(
self.fiscalyear_id.date_start)
fy_date_stop = fields.Date.from_string(
self.fiscalyear_id.date_stop)
if self.period_type == '0A':
# Anual
if fy_date_start.year != fy_date_stop.year:
return {
'warning': {'title': _('Warning'), 'message': _(
'Split fiscal years cannot be automatically '
'handled. You should select manually the periods.')
}
}
self.periods = self.fiscalyear_id.period_ids.filtered(
lambda x: not x.special)
elif self.period_type in ('1T', '2T', '3T', '4T'):
# Trimestral
start_month = (int(self.period_type[:1]) - 1) * 3 + 1
# Para manejar ejercicios fiscales divididos en dos periodos
year = (fy_date_start.year if
start_month < fy_date_start.month else
fy_date_stop.year)
period = period_model.find(
dt=fields.Date.to_string(
datetime(year=year, month=start_month, day=1)))
period_date_stop = fields.Date.from_string(period.date_stop)
self.periods = period
if period_date_stop.month != start_month + 2:
# Los periodos no están definidos trimestralmente
for i in range(1, 3):
month = start_month + i
period = period_model.find(
dt=fields.Date.to_string(
datetime(year=year, month=month, day=1)))
self.periods += period
elif self.period_type in ('01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12'):
# Mensual
month = int(self.period_type)
# Para manejar ejercicios fiscales divididos en dos periodos
year = (fy_date_start.year if month < fy_date_start.month else
fy_date_stop.year)
period = period_model.find(
dt=fields.Date.to_string(
datetime(year=year, month=month, day=1)))
period_date_start = fields.Date.from_string(period.date_start)
period_date_stop = fields.Date.from_string(period.date_stop)
if period_date_start.month != period_date_stop.month:
return {
'warning': {'title': _('Warning'), 'message': _(
'It seems that you have defined quarterly periods '
'or periods in the middle of the month. This '
'cannot be automatically handled. You should '
'select manually the periods.')
}
}
self.periods = period
@api.model
def _report_identifier_get(self, vals):
seq_obj = self.env['ir.sequence']
seq_name = "aeat%s-sequence" % self._model._aeat_number
company_id = vals.get('company_id', self.env.user.company_id.id)
seqs = seq_obj.search([('name', '=', seq_name),
('company_id', '=', company_id)])
return seq_obj.next_by_id(seqs.id)
@api.model
def create(self, vals):
if not vals.get('name'):
vals['name'] = self._report_identifier_get(vals)
return super(L10nEsAeatReport, self).create(vals)
@api.multi
def button_calculate(self):
res = self.calculate()
self.write({'state': 'calculated',
'calculation_date': fields.Datetime.now()})
return res
@api.multi
def button_recalculate(self):
self.write({'calculation_date': fields.Datetime.now()})
return self.calculate()
@api.multi
def _get_previous_fiscalyear_reports(self, date):
"""Get the AEAT reports previous to the given date.
:param date: Date for looking for previous reports.
:return: Recordset of the previous AEAT reports. None if there is no
previous reports.
"""
self.ensure_one()
prev_periods = self.fiscalyear_id.period_ids.filtered(
lambda x: not x.special and x.date_start < date)
prev_reports = None
for period in prev_periods:
reports = self.search([('periods', '=', period.id)])
if not reports:
raise exceptions.Warning(
_("There's a missing previous declaration for the period "
"%s.") % period.name)
if not prev_reports:
prev_reports = reports
else:
prev_reports |= reports
return prev_reports
@api.multi
def calculate(self):
for report in self:
if not report.periods:
raise exceptions.Warning(
_('There is no period defined for the report. Please set '
'at least one period and try again.'))
return True
@api.multi
def button_confirm(self):
"""Set report status to done."""
self.write({'state': 'done'})
return True
@api.multi
def _prepare_move_vals(self):
self.ensure_one()
last = self.periods.sorted(lambda x: x.date_stop)[-1:]
return {
'journal_id': self.journal_id.id,
'date': last.date_stop,
'period_id': last.id,
'ref': self.name,
'company_id': self.company_id.id,
}
@api.multi
def button_post(self):
"""Create any possible account move and set state to posted."""
self.create_regularization_move()
self.write({'state': 'posted'})
return True
@api.multi
def button_cancel(self):
"""Set report status to cancelled."""
self.write({'state': 'cancelled'})
return True
@api.multi
def button_unpost(self):
"""Remove created account move and set state to done."""
self.mapped('move_id').unlink()
self.write({'state': 'cancelled'})
return True
@api.multi
def button_recover(self):
"""Set report status to draft and reset calculation date."""
self.write({'state': 'draft', 'calculation_date': None})
return True
@api.multi
def button_export(self):
for report in self:
export_obj = self.env[
"l10n.es.aeat.report.%s.export_to_boe" % report.number]
export_obj.export_boe_file(report)
return True
@api.multi
def button_open_move(self):
self.ensure_one()
action = self.env.ref('account.action_move_line_form').read()[0]
action['view_mode'] = 'form'
action['res_id'] = self.move_id.id
del action['view_id']
del action['views']
return action
@api.multi
def unlink(self):
if any(item.state not in ['draft', 'cancelled'] for item in self):
raise exceptions.Warning(_("Only reports in 'draft' or "
"'cancelled' state can be removed"))
return super(L10nEsAeatReport, self).unlink()
def init(self, cr):
# TODO: Poner en el _register_hook para evitar choque en multi BDs
if self._name not in ('l10n.es.aeat.report',
'l10n.es.aeat.report.tax.mapping'):
seq_obj = self.pool['ir.sequence']
try:
aeat_num = getattr(self, '_aeat_number')
if not aeat_num:
raise Exception()
sequence = "aeat%s-sequence" % aeat_num
if not seq_obj.search(cr, SUPERUSER_ID,
[('name', '=', sequence)]):
seq_vals = {'name': sequence,
'code': 'aeat.sequence.type',
'number_increment': 1,
'implementation': 'no_gap',
'padding': 13 - len(str(aeat_num)),
'number_next_actual': 1,
'prefix': aeat_num
}
seq_obj.create(cr, SUPERUSER_ID, seq_vals)
except:
raise exceptions.Warning(
"Modelo no válido: %s. Debe declarar una variable "
"'_aeat_number'" % self._name)
|
FireWRT/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/host/lib/python2.7/test/test_epoll.py | 45 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Tests for epoll wrapper.
"""
import socket
import errno
import time
import select
import unittest
from test import test_support
if not hasattr(select, "epoll"):
raise unittest.SkipTest("test works only on Linux 2.6")
try:
select.epoll()
except IOError, e:
if e.errno == errno.ENOSYS:
raise unittest.SkipTest("kernel doesn't support epoll()")
raise
class TestEPoll(unittest.TestCase):
def setUp(self):
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
for skt in self.connections:
skt.close()
def _connected_pair(self):
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error, e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
raise AssertionError("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
try:
ep = select.epoll(16)
except OSError, e:
raise AssertionError(str(e))
self.assertTrue(ep.fileno() > 0, ep.fileno())
self.assertTrue(not ep.closed)
ep.close()
self.assertTrue(ep.closed)
self.assertRaises(ValueError, ep.fileno)
def test_badcreate(self):
self.assertRaises(TypeError, select.epoll, 1, 2, 3)
self.assertRaises(TypeError, select.epoll, 'foo')
self.assertRaises(TypeError, select.epoll, None)
self.assertRaises(TypeError, select.epoll, ())
self.assertRaises(TypeError, select.epoll, ['foo'])
self.assertRaises(TypeError, select.epoll, {})
def test_add(self):
server, client = self._connected_pair()
ep = select.epoll(2)
try:
ep.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
# adding by object w/ fileno works, too.
ep = select.epoll(2)
try:
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
ep.register(client, select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
ep = select.epoll(2)
try:
# TypeError: argument must be an int, or have a fileno() method.
self.assertRaises(TypeError, ep.register, object(),
select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(TypeError, ep.register, None,
select.EPOLLIN | select.EPOLLOUT)
# ValueError: file descriptor cannot be a negative integer (-1)
self.assertRaises(ValueError, ep.register, -1,
select.EPOLLIN | select.EPOLLOUT)
# IOError: [Errno 9] Bad file descriptor
self.assertRaises(IOError, ep.register, 10000,
select.EPOLLIN | select.EPOLLOUT)
# registering twice also raises an exception
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(IOError, ep.register, server,
select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
def test_fromfd(self):
server, client = self._connected_pair()
ep = select.epoll(2)
ep2 = select.epoll.fromfd(ep.fileno())
ep2.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep2.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
events = ep.poll(1, 4)
events2 = ep2.poll(0.9, 4)
self.assertEqual(len(events), 2)
self.assertEqual(len(events2), 2)
ep.close()
try:
ep2.poll(1, 4)
except IOError, e:
self.assertEqual(e.args[0], errno.EBADF, e)
else:
self.fail("epoll on closed fd didn't raise EBADF")
def test_control_and_wait(self):
client, server = self._connected_pair()
ep = select.epoll(16)
ep.register(server.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
ep.register(client.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.1, then - now)
events.sort()
expected = [(client.fileno(), select.EPOLLOUT),
(server.fileno(), select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
events = ep.poll(timeout=2.1, maxevents=4)
self.assertFalse(events)
client.send("Hello!")
server.send("world!!!")
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
events.sort()
expected = [(client.fileno(), select.EPOLLIN | select.EPOLLOUT),
(server.fileno(), select.EPOLLIN | select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
ep.unregister(client.fileno())
ep.modify(server.fileno(), select.EPOLLOUT)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
expected = [(server.fileno(), select.EPOLLOUT)]
self.assertEqual(events, expected)
def test_errors(self):
self.assertRaises(ValueError, select.epoll, -2)
self.assertRaises(ValueError, select.epoll().register, -1,
select.EPOLLIN)
def test_unregister_closed(self):
server, client = self._connected_pair()
fd = server.fileno()
ep = select.epoll(16)
ep.register(server)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
server.close()
ep.unregister(fd)
def test_main():
test_support.run_unittest(TestEPoll)
if __name__ == "__main__":
test_main()
|
fengren/python_koans | refs/heads/master | python3/koans/about_attribute_access.py | 104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(___): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(___): typical.__getattribute__('foobar')
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobar, __)
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobaz, __) # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegexpMatches(err_msg, __)
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(getattr(catcher, 'any_attribute'), __)
# ------------------------------------------------------------------
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual(__, catcher.foo_bar)
self.assertEqual(__, catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(___): catcher.normal_undefined_attribute
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
global stack_depth # We need something that is outside the scope of this class
stack_depth += 1
if stack_depth<=10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute (no_of_getattribute_calls)
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(__, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(__, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual(__,
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(__, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual(__, fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = '__'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual(__, setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(__, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(__, setter._num_of_private_coconuts)
|
LiveZenLK/CeygateERP | refs/heads/master | addons/l10n_be_invoice_bba/partner.py | 47 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
from openerp.osv import fields, osv
import time
from openerp.tools.translate import _
class res_partner(osv.osv):
""" add field to indicate default 'Communication Type' on customer invoices """
_inherit = 'res.partner'
def _get_comm_type(self, cr, uid, context=None):
res = self.pool.get('account.invoice')._get_reference_type(cr, uid,context=context)
return res
_columns = {
'out_inv_comm_type': fields.selection(_get_comm_type, 'Communication Type', change_default=True,
help='Select Default Communication Type for Outgoing Invoices.' ),
'out_inv_comm_algorithm': fields.selection([
('random','Random'),
('date','Date'),
('partner_ref','Customer Reference'),
], 'Communication Algorithm',
help='Select Algorithm to generate the Structured Communication on Outgoing Invoices.' ),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['out_inv_comm_type', 'out_inv_comm_algorithm']
_default = {
'out_inv_comm_type': 'none',
}
|
artemsok/sockeye | refs/heads/master | sockeye_contrib/autopilot/test.py | 1 | #!/usr/bin/env python3
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import glob
import os
import shutil
import subprocess
import sys
import tempfile
from typing import List
# Make sure the version of sockeye being tested is first on the system path
try:
import sockeye_contrib.autopilot.autopilot as autopilot
except ImportError:
SOCKEYE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
PYTHONPATH = "PYTHONPATH"
if os.environ.get(PYTHONPATH, None):
os.environ[PYTHONPATH] += os.pathsep + SOCKEYE_ROOT
else:
os.environ[PYTHONPATH] = SOCKEYE_ROOT
sys.path.append(SOCKEYE_ROOT)
import sockeye_contrib.autopilot.autopilot as autopilot
# Test-specific constants
WNMT_TASK = "wnmt18_en_de"
DATA_ONLY_TASK = "wmt14_fr_en"
WMT_TASK = "wmt14_de_en"
WMT_SRC = "de"
WMT_TRG = "en"
WMT_BPE = 32000
PREFIX_ZERO = "0."
def run_test(command: List[str], workspace: str):
"""
Run a test command in a given workspace directory. If it succeeds, clean up
model files. If it fails, print the last log file.
"""
success = False
try:
subprocess.check_call(command + ["--workspace={}".format(workspace)])
success = True
except subprocess.CalledProcessError:
pass
if not success:
print("Error running command. Final log file:", file=sys.stderr)
print("==========", file=sys.stderr)
log_dir = os.path.join(workspace, autopilot.DIR_LOGS)
last_log = sorted(os.listdir(log_dir), key=lambda fname: os.stat(os.path.join(log_dir, fname)).st_mtime)[-1]
with open(os.path.join(log_dir, last_log), "r") as log:
for line in log:
print(line, file=sys.stderr, end="")
print("==========", file=sys.stderr)
raise RuntimeError("Test failed: %s" % " ".join(command))
# Cleanup models, leaving data avaiable for use as custom inputs to other
# tasks
model_dirs = glob.glob(os.path.join(workspace, autopilot.DIR_SYSTEMS, "*", "model.*"))
for model_dir in model_dirs:
shutil.rmtree(model_dir)
def main():
"""
Build test systems with different types of pre-defined data and custom data
with all levels of pre-processing.
"""
with tempfile.TemporaryDirectory(prefix="sockeye.autopilot.") as tmp_dir:
work_dir = os.path.join(tmp_dir, "workspace")
# WMT task with raw data (Transformer)
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--task={}".format(WMT_TASK),
"--model=transformer",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
# WMT task with raw data (GNMT)
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--task={}".format(WMT_TASK),
"--model=gnmt_like",
"--decode-settings=gnmt_like",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
# TODO: Currently disabled due to periodic outages of nlp.stanford.edu
# preventing downloading data.
# WNMT task with pre-tokenized data (Transformer)
# command = [sys.executable,
# "-m",
# "sockeye_contrib.autopilot.autopilot",
# "--task={}".format(WNMT_TASK),
# "--model=transformer",
# "--gpus=0",
# "--test"]
# run_test(command, workspace=work_dir)
# WMT task, prepare data only
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--task={}".format(DATA_ONLY_TASK),
"--model=none",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
# Custom task (raw data, Transformer)
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--custom-task=custom_raw",
"--custom-train",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_TRG_GZ),
"--custom-dev",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_DEV + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_DEV + autopilot.SUFFIX_TRG_GZ),
"--custom-test",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_RAW, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_TRG_GZ),
"--custom-lang",
WMT_SRC,
WMT_TRG,
"--custom-bpe-op={}".format(WMT_BPE),
"--model=transformer",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
# Custom task (tokenized data, Transformer)
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--custom-task=custom_tok",
"--custom-train",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_TRG_GZ),
"--custom-dev",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_DEV + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_DEV + autopilot.SUFFIX_TRG_GZ),
"--custom-test",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_TOK, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_TRG_GZ),
"--custom-text-type=tok",
"--custom-bpe-op={}".format(WMT_BPE),
"--model=transformer",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
# Custom task (byte-pair encoded data, Transformer)
command = [sys.executable,
"-m",
"sockeye_contrib.autopilot.autopilot",
"--custom-task=custom_bpe",
"--custom-train",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_TRAIN + autopilot.SUFFIX_TRG_GZ),
"--custom-dev",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_DEV + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_DEV + autopilot.SUFFIX_TRG_GZ),
"--custom-test",
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_SRC_GZ),
os.path.join(work_dir, autopilot.DIR_SYSTEMS, WMT_TASK + autopilot.SUFFIX_TEST, autopilot.DIR_DATA,
autopilot.DIR_BPE, autopilot.PREFIX_TEST + PREFIX_ZERO + autopilot.SUFFIX_TRG_GZ),
"--custom-text-type=bpe",
"--model=transformer",
"--gpus=0",
"--test"]
run_test(command, workspace=work_dir)
if __name__ == "__main__":
main()
|
hisaharu/ryu | refs/heads/master | ryu/lib/sockopt.py | 38 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import socket
import struct
from ryu.lib import sockaddr
def _set_tcp_md5sig_linux(s, addr, key):
# struct tcp_md5sig {
# struct sockaddr_storage addr;
# u16 pad1;
# u16 keylen;
# u32 pad2;
# u8 key[80];
# }
TCP_MD5SIG = 14
af = s.family
if af == socket.AF_INET:
sa = sockaddr.sa_in4(addr)
elif af == socket.AF_INET6:
sa = sockaddr.sa_in6(addr)
else:
raise ValueError("unsupported af %s" % (af,))
ss = sockaddr.sa_to_ss(sa)
tcp_md5sig = ss + struct.pack("2xH4x80s", len(key), key)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, tcp_md5sig)
def _set_tcp_md5sig_bsd(s, _addr, _key):
# NOTE: On this platform, address and key need to be set using setkey(8).
TCP_MD5SIG = 0x10
tcp_md5sig = struct.pack("I", 1)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, tcp_md5sig)
def set_tcp_md5sig(s, addr, key):
"""Enable TCP-MD5 on the given socket.
:param s: Socket
:param addr: Associated address. On some platforms, this has no effect.
:param key: Key. On some platforms, this has no effect.
"""
impls = {
'FreeBSD': _set_tcp_md5sig_bsd,
'Linux': _set_tcp_md5sig_linux,
'NetBSD': _set_tcp_md5sig_bsd,
}
system = platform.system()
try:
impl = impls[system]
except KeyError:
raise NotImplementedError("TCP-MD5 unsupported on this platform")
impl(s, addr, key)
|
controlzee/three.js | refs/heads/master | utils/exporters/blender/addons/io_three/exporter/api/light.py | 195 | from bpy import data, types
from .. import utilities, logger
def _lamp(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Lamp):
lamp = name
else:
lamp = data.lamps[name]
return func(lamp, *args, **kwargs)
return inner
@_lamp
def angle(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.angle(%s)", lamp)
return lamp.spot_size
@_lamp
def color(lamp):
"""
:param lamp:
:rtype: int
"""
logger.debug("light.color(%s)", lamp)
colour = (lamp.color.r, lamp.color.g, lamp.color.b)
return utilities.rgb2int(colour)
@_lamp
def distance(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.distance(%s)", lamp)
return lamp.distance
@_lamp
def intensity(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.intensity(%s)", lamp)
return round(lamp.energy, 2)
|
gandalfcode/gandalf | refs/heads/master | examples/example12.py | 1 | #==============================================================================
# example12.py
# Plot particle quantities in an alternative coordinate system.
#==============================================================================
from gandalf.analysis.facade import *
from matplotlib.colors import LogNorm
# Create simulation object from Boss-Bodenheimer parameters file
sim = newsim("bossbodenheimer.dat")
sim.SetParam("tend",0.02)
setupsim()
# Run simulation and plot x-y positions of SPH particles in the default
# units specified in the `bossbodenheimer.dat' parameters file.
plot("x","y")
addplot("x","y",type="star")
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
window()
render("x","y","rho",res=256,#norm=LogNorm(),
interpolation='bicubic')
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
run()
block()
# After pressing return, re-plot last snapshot but in new specified units (au).
window(1)
plot("x","y",xunit="au",yunit="au")
window(2)
render("x","y","rho",res=256,#norm=LogNorm(),
interpolation='bicubic')
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
block()
|
nikitabiradar/student_registration | refs/heads/master | janastu/lib/python2.7/encodings/shift_jisx0213.py | 816 | #
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mirestrepo/voxels-at-lems | refs/heads/master | registration_eval/perturb/reg3d_main_pert.py | 1 | #!/usr/bin/env python
# encoding: utf-8
"""
Author: Isabel Restrepo
A script that encapsulates all steps to evaluate
3d-registration algorithms in the PVM under camera perturbation errors
September 12, 2012
"""
import os, sys, argparse
#set up enviroment
CONFIGURATION= "Release";
sys.path.append("/Projects/vxl/bin/" +CONFIGURATION +"/lib");
sys.path.append("/Projects/vxl/src/contrib/brl/bseg/boxm2/pyscripts");
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", action="store", type=str,
dest="root_dir",
default="/Users/isa/Experiments/reg3d_eval/downtown_dan",
help="Path to root directory")
parser.add_argument("--trial", action="store", type=int,
dest="trial", default=0,
help="Trial number")
parser.add_argument("--si", action="store", type=int,
dest="si", default=0,
help="Sigma index, where sigma = [0.05, 0.1, 0.15] ")
parser.add_argument("--perturb", action="store", type=bool,
dest="perturb", default=False,
help="Run initial alignment")
parser.add_argument("--reg_ia", action="store", type=bool,
dest="reg_ia", default=False,
help="Run initial alignment")
parser.add_argument("--reg_icp", action="store", type=bool,
dest="reg_icp", default=False,
help="Run ICP")
parser.add_argument("--vis_ia", action="store", type=bool,
dest="vis_ia", default=False,
help="Visualize initial alignment")
parser.add_argument("--vis_icp", action="store", type=bool,
dest="vis_icp", default=False,
help="Visualize ICP")
parser.add_argument("--plot_Terror", action="store", type=bool,
dest="plot_Terror", default=False,
help="Plot Transformation errors")
parser.add_argument("--descriptor", action="store", type=str,
dest="descriptor", default="FPFH",
help="Trial number")
parser.add_argument("--rej_normals", action="store", type=bool,
dest="rej_normals", default=False,
help="Reject normals?")
parser.add_argument("--verbose", action="store", type=bool,
dest="verbose", default=False,
help="Print or redirect to log file")
parser.add_argument("--n_iter", action="store", type=int,
dest="n_iter", default=200,
help="Number of iterations")
parser.add_argument("--geo", action="store", type=bool,
dest="geo", default=False,
help="Use reoregistered clouds?")
args = parser.parse_args()
print args
gt_root_dir = args.root_dir + "/original"
trial_number = args.trial
sigma = [0.05, 0.1, 0.15]
sigma_str = ["005", "01", "015"]
trial_root_dir = args.root_dir + "/pert_" + sigma_str[args.si] + "_" + str(args.trial)
descriptor_type = args.descriptor
radius = 30
percentile = 99
verbose = args.verbose
if args.perturb:
import perturb_cameras
from bbas_adaptor import *
rng = initialize_rng(); #init random number generator
print "Peturbing cameras"
for si in range(0, len(sigma)):
for ti in range(0, 10):
root_in = gt_root_dir
root_out = args.root_dir + "/pert_" + sigma_str[si] + "_" + str(ti)
perturb_cameras.perturb_cams(root_in, root_out, sigma[si], rng)
if args.reg_ia:
import reg3d
print "Running IA"
reg3d.register_ia(gt_root_dir, trial_root_dir, descriptor_type,
radius, percentile, args.n_iter, verbose)
if args.reg_icp:
import reg3d
print "Running ICP"
reg3d.register_icp(gt_root_dir, trial_root_dir, descriptor_type,
radius, percentile, args.n_iter, args.rej_normals,
verbose, True)
if args.vis_ia:
import reg3d
print "Visualizing IA"
reg3d.visualize_reg_ia(gt_root_dir, trial_root_dir,
descriptor_type, radius,
percentile, args.n_iter, args.geo)
if args.vis_icp:
import reg3d
print "Visualizing ICP"
reg3d.visualize_reg_icp(gt_root_dir, trial_root_dir,
descriptor_type, radius,
percentile, args.n_iter, args.rej_normals, args.geo,
trial_number)
|
mitar/django | refs/heads/master | tests/modeltests/properties/tests.py | 126 | from __future__ import absolute_import
from django.test import TestCase
from .models import Person
class PropertyTests(TestCase):
def setUp(self):
self.a = Person(first_name='John', last_name='Lennon')
self.a.save()
def test_getter(self):
self.assertEqual(self.a.full_name, 'John Lennon')
def test_setter(self):
# The "full_name" property hasn't provided a "set" method.
self.assertRaises(AttributeError, setattr, self.a, 'full_name', 'Paul McCartney')
# But "full_name_2" has, and it can be used to initialise the class.
a2 = Person(full_name_2 = 'Paul McCartney')
a2.save()
self.assertEqual(a2.first_name, 'Paul')
|
kazukioishi/android_kernel_samsung_klte | refs/heads/cm-12.1 | tools/perf/util/setup.py | 4998 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
mindnervestech/mnrp | refs/heads/master | openerp/__init__.py | 100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name):
"""
Return the model registry for the given database. If the registry does not
exist yet, it is created on the fly.
"""
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ar4s/django | refs/heads/master | tests/template_tests/test_parser.py | 3 | """
Testing some internals of the template processing. These are *not* examples to be copied in user code.
"""
from __future__ import unicode_literals
from unittest import TestCase
from django.template import (TokenParser, FilterExpression, Parser, Variable,
Template, TemplateSyntaxError, Library)
from django.test.utils import override_settings
from django.utils import six
class ParserTests(TestCase):
def test_token_parsing(self):
# Tests for TokenParser behavior in the face of quoted strings with
# spaces.
p = TokenParser("tag thevar|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "thevar|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser('tag "a value"|filter sometag')
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), '"a value"|filter')
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser("tag 'a value'|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "'a value'|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
def test_filter_parsing(self):
c = {"article": {"section": "News"}}
p = Parser("")
def fe_test(s, val):
self.assertEqual(FilterExpression(s, p).resolve(c), val)
fe_test("article.section", "News")
fe_test("article.section|upper", "NEWS")
fe_test('"News"', "News")
fe_test("'News'", "News")
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r"'Some \'Bad\' News'", "Some 'Bad' News")
fe = FilterExpression(r'"Some \"Good\" News"', p)
self.assertEqual(fe.filters, [])
self.assertEqual(fe.var, 'Some "Good" News')
# Filtered variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
FilterExpression, "article._hidden|upper", p
)
def test_variable_parsing(self):
c = {"article": {"section": "News"}}
self.assertEqual(Variable("article.section").resolve(c), "News")
self.assertEqual(Variable('"News"').resolve(c), "News")
self.assertEqual(Variable("'News'").resolve(c), "News")
# Translated strings are handled correctly.
self.assertEqual(Variable("_(article.section)").resolve(c), "News")
self.assertEqual(Variable('_("Good News")').resolve(c), "Good News")
self.assertEqual(Variable("_('Better News')").resolve(c), "Better News")
# Escaped quotes work correctly as well.
self.assertEqual(
Variable(r'"Some \"Good\" News"').resolve(c), 'Some "Good" News'
)
self.assertEqual(
Variable(r"'Some \'Better\' News'").resolve(c), "Some 'Better' News"
)
# Variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
Variable, "article._hidden"
)
# Variables should raise on non string type
with six.assertRaisesRegex(self, TypeError, "Variable must be a string or number, got <(class|type) 'dict'>"):
Variable({})
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_compile_filter_error(self):
# regression test for #19819
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with six.assertRaisesRegex(self, TemplateSyntaxError, msg) as cm:
Template("{% if 1 %}{{ foo@bar }}{% endif %}")
self.assertEqual(cm.exception.django_template_source[1], (10, 23))
def test_filter_args_count(self):
p = Parser("")
l = Library()
@l.filter
def no_arguments(value):
pass
@l.filter
def one_argument(value, arg):
pass
@l.filter
def one_opt_argument(value, arg=False):
pass
@l.filter
def two_arguments(value, arg, arg2):
pass
@l.filter
def two_one_opt_arg(value, arg, arg2=False):
pass
p.add_library(l)
for expr in (
'1|no_arguments:"1"',
'1|two_arguments',
'1|two_arguments:"1"',
'1|two_one_opt_arg',
):
with self.assertRaises(TemplateSyntaxError):
FilterExpression(expr, p)
for expr in (
# Correct number of arguments
'1|no_arguments',
'1|one_argument:"1"',
# One optional
'1|one_opt_argument',
'1|one_opt_argument:"1"',
# Not supplying all
'1|two_one_opt_arg:"1"',
):
FilterExpression(expr, p)
|
OmarIthawi/edx-platform | refs/heads/master | lms/djangoapps/open_ended_grading/staff_grading.py | 192 | """
LMS part of instructor grading:
- views + ajax handling
- calls the instructor grading service
"""
import logging
log = logging.getLogger(__name__)
class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
def __init__(self, course):
self.course = course
def get_html(self):
return "<b>Instructor grading!</b>"
# context = {}
# return render_to_string('courseware/instructor_grading_view.html', context)
|
armstrong/armstrong.esi | refs/heads/master | example/hello/tests.py | 1940 | """
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.