gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
#
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import libxml2
import json
import os
from zope.interface import implementer
from xml.etree.ElementTree import fromstring
from imgfac.Template import Template
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.BuildDispatcher import BuildDispatcher
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.CloudDelegate import CloudDelegate
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist, create_cloud_info
from imgfac.FactoryUtils import check_qcow_size, subprocess_check_output, qemu_convert_cmd
try:
from keystoneclient.v2_0 import client
import glanceclient as glance_client
GLANCE_VERSION = 2
except ImportError:
try:
# backward compatible
from glance import client as glance_client
GLANCE_VERSION = 1
except ImportError:
GLANCE_VERSION = None
@implementer(CloudDelegate)
class OpenStack(object):
def __init__(self):
# Note that we are now missing ( template, target, config_block = None):
super(OpenStack, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.version = GLANCE_VERSION
if self.version == 2:
self.credentials_attrs = [ 'auth_url', 'password', 'tenant', 'username']
elif self.version == 1:
self.credentials_attrs = [ 'auth_url', 'password', 'strategy', 'tenant', 'username']
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
# Fail gracefully if the Glance/Keystone clients are not installed.
if self.version is None:
raise ImageFactoryException('Keystone/Glance clients not available - cannot push to provider')
# Our target_image is already a raw KVM image. All we need to do is upload to glance
self.builder = builder
self.active_image = self.builder.provider_image
self.openstack_decode_credentials(credentials)
provider_data = self.get_dynamic_provider_data(provider)
if provider_data is None:
raise ImageFactoryException("OpenStack KVM instance not found in XML or JSON provided")
# Image is always here and it is the target_image datafile
input_image = self.builder.target_image.data
# If the template species a name, use that, otherwise create a name
# using provider_image.identifier.
template = Template(self.builder.provider_image.template)
if template.name:
image_name = template.name
else:
image_name = 'ImageFactory created image - %s' % (self.builder.provider_image.identifier)
if check_qcow_size(input_image):
self.log.debug("Uploading image to glance, detected qcow format")
disk_format='qcow2'
else:
self.log.debug("Uploading image to glance, assuming raw format")
disk_format='raw'
# Support openstack grizzly keystone authentication and glance upload
if self.version == 2:
if self.credentials_token is None:
self.credentials_token = self.keystone_authenticate(**self.credentials_dict)
provider_data['name'] = image_name
provider_data['disk_format'] = disk_format
image_id = self.glance_upload_v2(input_image, self.credentials_token, **provider_data)
elif self.version == 1:
# Also support backward compatible for folsom
image_id = self.glance_upload(input_image, creds = self.credentials_dict, token = self.credentials_token,
host=provider_data['glance-host'], port=provider_data['glance-port'],
name=image_name, disk_format=disk_format)
self.builder.provider_image.identifier_on_provider = image_id
if 'username' in self.credentials_dict:
self.builder.provider_image.provider_account_identifier = self.credentials_dict['username']
self.percent_complete=100
def openstack_decode_credentials(self, credentials):
self.activity("Preparing OpenStack credentials")
# TODO: Validate these - in particular, ensure that if some nodes are missing at least
# a minimal acceptable set of auth is present
doc = libxml2.parseDoc(credentials)
self.credentials_dict = { }
for authprop in self.credentials_attrs:
value = self._get_xml_node(doc, authprop)
if value is not None:
self.credentials_dict[authprop] = value
self.credentials_token = self._get_xml_node(doc, 'token')
def _get_xml_node(self, doc, credtype):
nodes = doc.xpathEval("//provider_credentials/openstack_credentials/%s" % (credtype))
# OpenStack supports multiple auth schemes so not all nodes are required
if len(nodes) < 1:
return None
return nodes[0].content
def snapshot_image_on_provider(self, builder, provider, credentials, template, parameters):
# TODO: Implement snapshot builds
raise ImageFactoryException("Snapshot builds not currently supported on OpenStack KVM")
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
pass
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
self.target=target
self.builder=builder
self.modify_oz_filesystem()
# OS plugin has already provided the initial file for us to work with
# which we can currently assume is a raw image
input_image = builder.target_image.data
# Support conversion to alternate preferred image format
# Currently only handle qcow2, but the size reduction of
# using this avoids the performance penalty of uploading
# (and launching) raw disk images on slow storage
if self.app_config.get('openstack_image_format', 'raw') == 'qcow2':
# None of the existing input base_image plugins produce compressed qcow2 output
# the step below is either going from raw to compressed qcow2 or
# uncompressed qcow2 to compressed qcow2
self.log.debug("Converting image to compressed qcow2 format")
tmp_output = input_image + ".tmp.qcow2"
convert_cmd = qemu_convert_cmd(input_image, tmp_output, True)
(stdout, stderr, retcode) = subprocess_check_output(convert_cmd)
os.unlink(input_image)
os.rename(tmp_output, input_image)
def modify_oz_filesystem(self):
self.log.debug("Doing further Factory specific modification of Oz image")
guestfs_handle = launch_inspect_and_mount(self.builder.target_image.data)
remove_net_persist(guestfs_handle)
create_cloud_info(guestfs_handle, self.target)
shutdown_and_close(guestfs_handle)
def get_dynamic_provider_data(self, provider):
try:
xml_et = fromstring(provider)
return xml_et.attrib
except Exception as e:
self.log.debug('Testing provider for XML: %s' % e)
pass
try:
jload = json.loads(provider)
return jload
except ValueError as e:
self.log.debug('Testing provider for JSON: %s' % e)
pass
return None
def keystone_authenticate(self, **kwargs):
user = kwargs.get('username')
pwd = kwargs.get('password')
tenant = kwargs.get('tenant')
url = kwargs.get('auth_url', 'http://127.0.0.1:5000/v2.0')
keystone = client.Client(username=user, password=pwd, tenant_name=tenant, auth_url=url)
keystone.authenticate()
return keystone.auth_token
def glance_upload(self, image_filename, creds = {'auth_url': None, 'password': None, 'strategy': 'noauth', 'tenant': None, 'username': None},
host = "0.0.0.0", port = "9292", token = None, name = 'Factory Test Image', disk_format = 'raw'):
image_meta = {'container_format': 'bare',
'disk_format': disk_format,
'is_public': True,
'min_disk': 0,
'min_ram': 0,
'name': name,
'properties': {'distro': 'rhel'}}
c = glance_client.Client(host=host, port=port,
auth_tok=token, creds=creds)
image_data = open(image_filename, "r")
image_meta = c.add_image(image_meta, image_data)
image_data.close()
return image_meta['id']
def glance_upload_v2(self, image, auth_token=None, **kwargs):
if image is None:
raise ImageFactoryException("No image is provided")
glance_host = kwargs.setdefault("glance-host", "127.0.0.1")
glance_port = kwargs.setdefault("glance-port", "9292")
glance_url = "http://%s:%s" % (glance_host, glance_port)
image_data = open(image, "r")
image_meta = {
'container_format': kwargs.setdefault('container_format', 'bare'),
'disk_format': kwargs.setdefault('disk_format', 'raw'),
'is_public': kwargs.setdefault('is_public', False),
'min_disk': kwargs.setdefault('min_disk', 0),
'min_ram': kwargs.setdefault('min_ram', 0),
'name': kwargs.setdefault('name', 'Factory Test Image'),
'data': image_data,
}
c = glance_client.Client('1', glance_url, token=auth_token)
image_meta = c.images.create(**image_meta)
image_data.close()
return image_meta.id
|
|
"""
Illustrates an explicit way to persist an XML document expressed using
ElementTree.
Each DOM node is stored in an individual
table row, with attributes represented in a separate table. The
nodes are associated in a hierarchy using an adjacency list
structure. A query function is introduced which can search for nodes
along any path with a given structure of attributes, basically a
(very narrow) subset of xpath.
This example explicitly marshals/unmarshals the ElementTree document into
mapped entities which have their own tables. Compare to pickle_type.py which
uses PickleType to accomplish the same task. Note that the usage of both
styles of persistence are identical, as is the structure of the main Document
class.
"""
# PART I - Imports/Configuration
from __future__ import print_function
import os
import re
from xml.etree import ElementTree
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy.orm import aliased
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
e = create_engine("sqlite://")
meta = MetaData()
# PART II - Table Metadata
# stores a top level record of an XML document.
documents = Table(
"documents",
meta,
Column("document_id", Integer, primary_key=True),
Column("filename", String(30), unique=True),
Column("element_id", Integer, ForeignKey("elements.element_id")),
)
# stores XML nodes in an adjacency list model. This corresponds to
# Element and SubElement objects.
elements = Table(
"elements",
meta,
Column("element_id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("elements.element_id")),
Column("tag", Unicode(30), nullable=False),
Column("text", Unicode),
Column("tail", Unicode),
)
# stores attributes. This corresponds to the dictionary of attributes
# stored by an Element or SubElement.
attributes = Table(
"attributes",
meta,
Column(
"element_id",
Integer,
ForeignKey("elements.element_id"),
primary_key=True,
),
Column("name", Unicode(100), nullable=False, primary_key=True),
Column("value", Unicode(255)),
)
meta.create_all(e)
# PART III - Model
# our document class. contains a string name,
# and the ElementTree root element.
class Document(object):
def __init__(self, name, element):
self.filename = name
self.element = element
# PART IV - Persistence Mapping
# Node class. a non-public class which will represent the DB-persisted
# Element/SubElement object. We cannot create mappers for ElementTree elements
# directly because they are at the very least not new-style classes, and also
# may be backed by native implementations. so here we construct an adapter.
class _Node(object):
pass
# Attribute class. also internal, this will represent the key/value attributes
# stored for a particular Node.
class _Attribute(object):
def __init__(self, name, value):
self.name = name
self.value = value
# setup mappers. Document will eagerly load a list of _Node objects.
mapper(
Document,
documents,
properties={"_root": relationship(_Node, lazy="joined", cascade="all")},
)
mapper(
_Node,
elements,
properties={
"children": relationship(_Node, cascade="all"),
# eagerly load attributes
"attributes": relationship(
_Attribute, lazy="joined", cascade="all, delete-orphan"
),
},
)
mapper(_Attribute, attributes)
# define marshalling functions that convert from _Node/_Attribute to/from
# ElementTree objects. this will set the ElementTree element as
# "document._element", and append the root _Node object to the "_root" mapped
# collection.
class ElementTreeMarshal(object):
def __get__(self, document, owner):
if document is None:
return self
if hasattr(document, "_element"):
return document._element
def traverse(node, parent=None):
if parent is not None:
elem = ElementTree.SubElement(parent, node.tag)
else:
elem = ElementTree.Element(node.tag)
elem.text = node.text
elem.tail = node.tail
for attr in node.attributes:
elem.attrib[attr.name] = attr.value
for child in node.children:
traverse(child, parent=elem)
return elem
document._element = ElementTree.ElementTree(traverse(document._root))
return document._element
def __set__(self, document, element):
def traverse(node):
n = _Node()
n.tag = str(node.tag)
n.text = str(node.text)
n.tail = str(node.tail) if node.tail else None
n.children = [traverse(n2) for n2 in node]
n.attributes = [
_Attribute(str(k), str(v)) for k, v in node.attrib.items()
]
return n
document._root = traverse(element.getroot())
document._element = element
def __delete__(self, document):
del document._element
document._root = []
# override Document's "element" attribute with the marshaller.
Document.element = ElementTreeMarshal()
# PART V - Basic Persistence Example
line = "\n--------------------------------------------------------"
# save to DB
session = Session(e)
# get ElementTree documents
for file in ("test.xml", "test2.xml", "test3.xml"):
filename = os.path.join(os.path.dirname(__file__), file)
doc = ElementTree.parse(filename)
session.add(Document(file, doc))
print("\nSaving three documents...", line)
session.commit()
print("Done.")
print("\nFull text of document 'text.xml':", line)
document = session.query(Document).filter_by(filename="test.xml").first()
ElementTree.dump(document.element)
# PART VI - Searching for Paths
# manually search for a document which contains "/somefile/header/field1:hi"
root = aliased(_Node)
child_node = aliased(_Node)
grandchild_node = aliased(_Node)
d = (
session.query(Document)
.join(Document._root.of_type(root))
.filter(root.tag == "somefile")
.join(root.children.of_type(child_node))
.filter(child_node.tag == "header")
.join(child_node.children.of_type(grandchild_node))
.filter(
and_(grandchild_node.tag == "field1", grandchild_node.text == "hi")
)
.one()
)
ElementTree.dump(d.element)
# generalize the above approach into an extremely impoverished xpath function:
def find_document(path, compareto):
query = session.query(Document)
attribute = Document._root
for i, match in enumerate(
re.finditer(r"/([\w_]+)(?:\[@([\w_]+)(?:=(.*))?\])?", path)
):
(token, attrname, attrvalue) = match.group(1, 2, 3)
target_node = aliased(_Node)
query = query.join(attribute.of_type(target_node)).filter(
target_node.tag == token
)
attribute = target_node.children
if attrname:
attribute_entity = aliased(_Attribute)
if attrvalue:
query = query.join(
target_node.attributes.of_type(attribute_entity)
).filter(
and_(
attribute_entity.name == attrname,
attribute_entity.value == attrvalue,
)
)
else:
query = query.join(
target_node.attributes.of_type(attribute_entity)
).filter(attribute_entity.name == attrname)
return (
query.options(lazyload(Document._root))
.filter(target_node.text == compareto)
.all()
)
for path, compareto in (
("/somefile/header/field1", "hi"),
("/somefile/field1", "hi"),
("/somefile/header/field2", "there"),
("/somefile/header/field2[@attr=foo]", "there"),
):
print("\nDocuments containing '%s=%s':" % (path, compareto), line)
print([d.filename for d in find_document(path, compareto)])
|
|
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import os
from oslo_utils.fixture import uuidsentinel
from nova.compute import provider_tree
from nova import conf
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.virt import fake
from nova.virt.zvm import driver as zvmdriver
CONF = conf.CONF
class TestZVMDriver(test.NoDBTestCase):
def setUp(self):
super(TestZVMDriver, self).setUp()
self.flags(my_ip='192.168.1.1',
instance_name_template='abc%05d')
self.flags(cloud_connector_url='https://1.1.1.1:1111', group='zvm')
with mock.patch('nova.virt.zvm.utils.ConnectorClient.call') as mcall, \
mock.patch('pwd.getpwuid', return_value=mock.Mock(pw_name='test')):
mcall.return_value = {'hypervisor_hostname': 'TESTHOST',
'ipl_time': 'IPL at 11/14/17 10:47:44 EST'}
self._driver = zvmdriver.ZVMDriver(fake.FakeVirtAPI())
self._hypervisor = self._driver._hypervisor
self._context = context.RequestContext('fake_user', 'fake_project')
self._image_id = uuidsentinel.imag_id
self._instance_values = {
'display_name': 'test',
'uuid': uuidsentinel.inst_id,
'vcpus': 1,
'memory_mb': 1024,
'image_ref': self._image_id,
'root_gb': 0,
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._instance.flavor = objects.Flavor(name='testflavor',
vcpus=1, root_gb=3, ephemeral_gb=10,
swap=0, memory_mb=512, extra_specs={})
self._eph_disks = [{'guest_format': u'ext3',
'device_name': u'/dev/sdb',
'disk_bus': None,
'device_type': None,
'size': 1},
{'guest_format': u'ext4',
'device_name': u'/dev/sdc',
'disk_bus': None,
'device_type': None,
'size': 2}]
self._block_device_info = {'swap': None,
'root_device_name': u'/dev/sda',
'ephemerals': self._eph_disks,
'block_device_mapping': []}
fake_image_meta = {'status': 'active',
'properties': {'os_distro': 'rhel7.2'},
'name': 'rhel72eckdimage',
'deleted': False,
'container_format': 'bare',
'disk_format': 'raw',
'id': self._image_id,
'owner': 'cfc26f9d6af948018621ab00a1675310',
'checksum': 'b026cd083ef8e9610a29eaf71459cc',
'min_disk': 0,
'is_public': False,
'deleted_at': None,
'min_ram': 0,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(fake_image_meta)
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': network_model.VIF_TYPE_OVS,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self._network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
self.mock_update_task_state = mock.Mock()
def test_driver_init_no_url(self):
self.flags(cloud_connector_url=None, group='zvm')
self.assertRaises(exception.ZVMDriverException,
zvmdriver.ZVMDriver, 'virtapi')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_available_resource_err_case(self, call):
res = {'overallRC': 1, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
results = self._driver.get_available_resource()
self.assertEqual(0, results['vcpus'])
self.assertEqual(0, results['memory_mb_used'])
self.assertEqual(0, results['disk_available_least'])
self.assertEqual(0, results['hypervisor_version'])
self.assertEqual('TESTHOST', results['hypervisor_hostname'])
def test_driver_template_validation(self):
self.flags(instance_name_template='abc%6d')
self.assertRaises(exception.ZVMDriverException,
self._driver._validate_options)
@mock.patch('nova.virt.zvm.guest.Guest.get_info')
def test_get_info(self, mock_get):
self._driver.get_info(self._instance)
mock_get.assert_called_once_with()
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_err(self, call):
res = {'overallRC': 500, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
self.assertRaises(exception.ZVMConnectorError,
self._driver._get_image_info,
'context', 'image_meta_id', 'os_distro')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._import_spawn_image')
def test_private_get_image_info(self, image_import, call):
res = {'overallRC': 404, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call_response = []
call_response.append(exception.ZVMConnectorError(results=res))
call_response.append([{'imagename': 'image-info'}])
call.side_effect = call_response
self._driver._get_image_info('context', 'image_meta_id', 'os_distro')
image_import.assert_called_once_with('context', 'image_meta_id',
'os_distro')
call.assert_has_calls(
[mock.call('image_query', imagename='image_meta_id')] * 2
)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_exist(self, call):
call.return_value = [{'imagename': 'image-info'}]
res = self._driver._get_image_info('context', 'image_meta_id',
'os_distro')
call.assert_called_once_with('image_query', imagename='image_meta_id')
self.assertEqual('image-info', res)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def _test_set_disk_list(self, call, has_get_root_units=False,
has_eph_disks=False):
disk_list = [{'is_boot_disk': True, 'size': '3g'}]
eph_disk_list = [{'format': u'ext3', 'size': '1g'},
{'format': u'ext3', 'size': '2g'}]
_inst = copy.deepcopy(self._instance)
_bdi = copy.deepcopy(self._block_device_info)
if has_get_root_units:
# overwrite
disk_list = [{'is_boot_disk': True, 'size': '3338'}]
call.return_value = '3338'
_inst['root_gb'] = 0
else:
_inst['root_gb'] = 3
if has_eph_disks:
disk_list += eph_disk_list
else:
_bdi['ephemerals'] = []
eph_disk_list = []
res1, res2 = self._driver._set_disk_list(_inst, self._image_meta.id,
_bdi)
if has_get_root_units:
call.assert_called_once_with('image_get_root_disk_size',
self._image_meta.id)
self.assertEqual(disk_list, res1)
self.assertEqual(eph_disk_list, res2)
def test_private_set_disk_list_simple(self):
self._test_set_disk_list()
def test_private_set_disk_list_with_eph_disks(self):
self._test_set_disk_list(has_eph_disks=True)
def test_private_set_disk_list_with_get_root_units(self):
self._test_set_disk_list(has_get_root_units=True)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_setup_network(self, call):
inst_nets = []
_net = {'ip_addr': '192.168.0.100',
'gateway_addr': '192.168.0.1',
'cidr': '192.168.0.1/24',
'mac_addr': 'DE:AD:BE:EF:00:00',
'nic_id': None}
inst_nets.append(_net)
self._driver._setup_network('vm_name', 'os_distro',
self._network_info,
self._instance)
call.assert_called_once_with('guest_create_network_interface',
'vm_name', 'os_distro', inst_nets)
@mock.patch('nova.virt.images.fetch')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_import_spawn_image(self, call, fetch):
image_name = CONF.zvm.image_tmp_path + '/image_name'
image_url = "file://" + image_name
image_meta = {'os_version': 'os_version'}
with mock.patch('os.path.exists', side_effect=[False]):
self._driver._import_spawn_image(self._context, 'image_name',
'os_version')
fetch.assert_called_once_with(self._context, 'image_name',
image_name)
call.assert_called_once_with('image_import', 'image_name', image_url,
image_meta, remote_host='test@192.168.1.1')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_destroy(self, call, guest_exists):
guest_exists.return_value = True
self._driver.destroy(self._context, self._instance,
network_info=self._network_info)
call.assert_called_once_with('guest_delete', self._instance['name'])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.compute.manager.ComputeVirtAPI.wait_for_instance_event')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._setup_network')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._set_disk_list')
@mock.patch('nova.virt.zvm.utils.generate_configdrive')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_spawn(self, call, get_image_info, gen_conf_file, set_disk_list,
setup_network, mock_wait, mock_exists):
_bdi = copy.copy(self._block_device_info)
get_image_info.return_value = 'image_name'
gen_conf_file.return_value = 'transportfiles'
set_disk_list.return_value = 'disk_list', 'eph_list'
mock_exists.return_value = False
self._driver.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=_bdi)
gen_conf_file.assert_called_once_with(self._context, self._instance,
None, self._network_info, None)
get_image_info.assert_called_once_with(self._context,
self._image_meta.id,
self._image_meta.properties.os_distro)
set_disk_list.assert_called_once_with(self._instance, 'image_name',
_bdi)
setup_network.assert_called_once_with(self._instance.name,
self._image_meta.properties.os_distro,
self._network_info, self._instance)
call.assert_has_calls([
mock.call('guest_create', self._instance.name,
1, 1024, disk_list='disk_list'),
mock.call('guest_deploy', self._instance.name, 'image_name',
transportfiles='transportfiles',
remotehost='test@192.168.1.1'),
mock.call('guest_config_minidisks', self._instance.name,
'eph_list'),
mock.call('guest_start', self._instance.name)
])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_empty(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'properties': {'os_distro': ''},
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_none(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_snapshot(self, call, get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
call_resp = ['', {"os_version": "rhel7.2",
"dest_url": "file:///path/to/target"}, '']
call.side_effect = call_resp
new_image_meta = {
'status': 'active',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': self._instance['project_id'],
'os_distro': call_resp[1]['os_version'],
'architecture': 's390x',
'hypervisor_type': 'zvm'
},
'disk_format': 'raw',
'container_format': 'bare',
}
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
dest_path = "file://" + image_path
self._driver.snapshot(self._context, self._instance, image_id,
self.mock_update_task_state)
get_image_service.assert_called_with(self._context, image_id)
mock_open.assert_called_once_with(image_path, 'r')
ret_file = mock_open.return_value.__enter__.return_value
image_service.update.assert_called_once_with(self._context,
image_id,
new_image_meta,
ret_file,
purge_props=False)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_has_calls([
mock.call('guest_capture', self._instance.name, image_id),
mock.call('image_export', image_id, dest_path,
remote_host=mock.ANY),
mock.call('image_delete', image_id)
])
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_capture')
def test_snapshot_capture_fail(self, mock_capture, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_capture.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_import_fail(self, mock_import, mock_delete,
call, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_import.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
get_image_service.assert_called_with(self._context, image_id)
call.assert_called_once_with('guest_capture',
self._instance.name, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_update_fail(self, mock_import, mock_delete, call,
get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
image_service.update.side_effect = exception.ImageNotAuthorized(
image_id='dummy')
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
self.assertRaises(exception.ImageNotAuthorized, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
mock_open.assert_called_once_with(image_path, 'r')
get_image_service.assert_called_with(self._context, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_called_once_with('guest_capture', self._instance.name,
image_id)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_start')
def test_guest_start(self, call):
self._driver.power_on(self._context, self._instance, None)
call.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off(self, ipa):
self._driver.power_off(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off_with_timeout_interval(self, ipa):
self._driver.power_off(self._instance, 60, 10)
ipa.assert_called_once_with(self._instance.name,
timeout=60, retry_interval=10)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_pause')
def test_pause(self, ipa):
self._driver.pause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_unpause')
def test_unpause(self, ipa):
self._driver.unpause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reboot')
def test_reboot_soft(self, ipa):
self._driver.reboot(None, self._instance, None, 'SOFT')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reset')
def test_reboot_hard(self, ipa):
self._driver.reboot(None, self._instance, None, 'HARD')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.list_names')
def test_instance_exists(self, mock_list):
mock_list.return_value = [self._instance.name.upper()]
# Create a new server which not in list_instances's output
another_instance = fake_instance.fake_instance_obj(self._context,
id=10)
self.assertTrue(self._driver.instance_exists(self._instance))
self.assertFalse(self._driver.instance_exists(another_instance))
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_console_output(self, call):
call.return_value = 'console output'
outputs = self._driver.get_console_output(None, self._instance)
call.assert_called_once_with('guest_get_console_output', 'abc00001')
self.assertEqual('console output', outputs)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_update_provider_tree(self, call):
host_info = {'vcpus': 84,
'disk_total': 2000,
'memory_mb': 78192}
call.return_value = host_info
expected_inv = {
'VCPU': {
'total': 84,
'min_unit': 1,
'max_unit': 84,
'step_size': 1,
'allocation_ratio': CONF.initial_cpu_allocation_ratio,
'reserved': CONF.reserved_host_cpus,
},
'MEMORY_MB': {
'total': 78192,
'min_unit': 1,
'max_unit': 78192,
'step_size': 1,
'allocation_ratio': CONF.initial_ram_allocation_ratio,
'reserved': CONF.reserved_host_memory_mb,
},
'DISK_GB': {
'total': 2000,
'min_unit': 1,
'max_unit': 2000,
'step_size': 1,
'allocation_ratio': CONF.initial_disk_allocation_ratio,
'reserved': CONF.reserved_host_disk_mb,
},
}
pt = provider_tree.ProviderTree()
nodename = 'fake-node'
pt.new_root(nodename, uuidsentinel.rp_uuid)
self._driver.update_provider_tree(pt, nodename)
inv = pt.data(nodename).inventory
self.assertEqual(expected_inv, inv)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files. Default to a '
'temp directory'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
return inner
return wrap
|
|
from __future__ import unicode_literals
import boto
import boto3
import botocore.exceptions
from boto.exception import SQSError
from boto.sqs.message import RawMessage, Message
import requests
import sure # noqa
import time
from moto import mock_sqs
from tests.helpers import requires_boto_gte
@mock_sqs
def test_create_queue():
conn = boto.connect_sqs('the_key', 'the_secret')
conn.create_queue("test-queue", visibility_timeout=60)
all_queues = conn.get_all_queues()
all_queues[0].name.should.equal("test-queue")
all_queues[0].get_timeout().should.equal(60)
@mock_sqs
def test_create_queues_in_multiple_region():
west1_conn = boto.sqs.connect_to_region("us-west-1")
west1_conn.create_queue("test-queue")
west2_conn = boto.sqs.connect_to_region("us-west-2")
west2_conn.create_queue("test-queue")
list(west1_conn.get_all_queues()).should.have.length_of(1)
list(west2_conn.get_all_queues()).should.have.length_of(1)
west1_conn.get_all_queues()[0].url.should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/test-queue')
@mock_sqs
def test_get_queue():
conn = boto.connect_sqs('the_key', 'the_secret')
conn.create_queue("test-queue", visibility_timeout=60)
queue = conn.get_queue("test-queue")
queue.name.should.equal("test-queue")
queue.get_timeout().should.equal(60)
nonexisting_queue = conn.get_queue("nonexisting_queue")
nonexisting_queue.should.be.none
@mock_sqs
def test_get_queue_with_prefix():
conn = boto.connect_sqs('the_key', 'the_secret')
conn.create_queue("prefixa-queue")
conn.create_queue("prefixb-queue")
conn.create_queue("test-queue")
conn.get_all_queues().should.have.length_of(3)
queue = conn.get_all_queues("test-")
queue.should.have.length_of(1)
queue[0].name.should.equal("test-queue")
@mock_sqs
def test_delete_queue():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
conn.get_all_queues().should.have.length_of(1)
queue.delete()
conn.get_all_queues().should.have.length_of(0)
queue.delete.when.called_with().should.throw(SQSError)
@mock_sqs
def test_set_queue_attribute():
conn = boto.connect_sqs('the_key', 'the_secret')
conn.create_queue("test-queue", visibility_timeout=60)
queue = conn.get_all_queues()[0]
queue.get_timeout().should.equal(60)
queue.set_attribute("VisibilityTimeout", 45)
queue = conn.get_all_queues()[0]
queue.get_timeout().should.equal(45)
@mock_sqs
def test_send_message():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = 'this is a test message'
body_two = 'this is another test message'
queue.write(queue.new_message(body_one))
queue.write(queue.new_message(body_two))
messages = conn.receive_message(queue, number_messages=2)
messages[0].get_body().should.equal(body_one)
messages[1].get_body().should.equal(body_two)
@mock_sqs
def test_send_message_with_xml_characters():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = '< & >'
queue.write(queue.new_message(body_one))
messages = conn.receive_message(queue, number_messages=1)
messages[0].get_body().should.equal(body_one)
@requires_boto_gte("2.28")
@mock_sqs
def test_send_message_with_attributes():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body = 'this is a test message'
message = queue.new_message(body)
message_attributes = {
'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'},
'test.binary_attribute': {'data_type': 'Binary', 'binary_value': 'binary value'},
'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'}
}
message.message_attributes = message_attributes
queue.write(message)
messages = conn.receive_message(queue)
messages[0].get_body().should.equal(body)
for name, value in message_attributes.items():
dict(messages[0].message_attributes[name]).should.equal(value)
@mock_sqs
def test_send_message_with_delay():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = 'this is a test message'
body_two = 'this is another test message'
queue.write(queue.new_message(body_one), delay_seconds=60)
queue.write(queue.new_message(body_two))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=2)
assert len(messages) == 1
message = messages[0]
assert message.get_body().should.equal(body_two)
queue.count().should.equal(0)
@mock_sqs
def test_send_large_message_fails():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = 'test message' * 200000
huge_message = queue.new_message(body_one)
queue.write.when.called_with(huge_message).should.throw(SQSError)
@mock_sqs
def test_message_becomes_inflight_when_received():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=2)
queue.set_message_class(RawMessage)
body_one = 'this is a test message'
queue.write(queue.new_message(body_one))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
queue.count().should.equal(0)
assert len(messages) == 1
# Wait
time.sleep(3)
queue.count().should.equal(1)
@mock_sqs
def test_receive_message_with_explicit_visibility_timeout():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = 'this is another test message'
queue.write(queue.new_message(body_one))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1, visibility_timeout=0)
assert len(messages) == 1
# Message should remain visible
queue.count().should.equal(1)
@mock_sqs
def test_change_message_visibility():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=2)
queue.set_message_class(RawMessage)
body_one = 'this is another test message'
queue.write(queue.new_message(body_one))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
queue.count().should.equal(0)
messages[0].change_visibility(2)
# Wait
time.sleep(1)
# Message is not visible
queue.count().should.equal(0)
time.sleep(2)
# Message now becomes visible
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
messages[0].delete()
queue.count().should.equal(0)
@mock_sqs
def test_message_attributes():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=2)
queue.set_message_class(RawMessage)
body_one = 'this is another test message'
queue.write(queue.new_message(body_one))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
queue.count().should.equal(0)
assert len(messages) == 1
message_attributes = messages[0].attributes
assert message_attributes.get('ApproximateFirstReceiveTimestamp')
assert int(message_attributes.get('ApproximateReceiveCount')) == 1
assert message_attributes.get('SentTimestamp')
assert message_attributes.get('SenderId')
@mock_sqs
def test_read_message_from_queue():
conn = boto.connect_sqs()
queue = conn.create_queue('testqueue')
queue.set_message_class(RawMessage)
body = 'foo bar baz'
queue.write(queue.new_message(body))
message = queue.read(1)
message.get_body().should.equal(body)
@mock_sqs
def test_queue_length():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
queue.write(queue.new_message('this is a test message'))
queue.write(queue.new_message('this is another test message'))
queue.count().should.equal(2)
@mock_sqs
def test_delete_message():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
queue.write(queue.new_message('this is a test message'))
queue.write(queue.new_message('this is another test message'))
queue.count().should.equal(2)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
messages[0].delete()
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
messages[0].delete()
queue.count().should.equal(0)
@mock_sqs
def test_send_batch_operation():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
# See https://github.com/boto/boto/issues/831
queue.set_message_class(RawMessage)
queue.write_batch([
("my_first_message", 'test message 1', 0),
("my_second_message", 'test message 2', 0),
("my_third_message", 'test message 3', 0),
])
messages = queue.get_messages(3)
messages[0].get_body().should.equal("test message 1")
# Test that pulling more messages doesn't break anything
messages = queue.get_messages(2)
@requires_boto_gte("2.28")
@mock_sqs
def test_send_batch_operation_with_message_attributes():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
message_tuple = ("my_first_message", 'test message 1', 0, {'name1': {'data_type': 'String', 'string_value': 'foo'}})
queue.write_batch([message_tuple])
messages = queue.get_messages()
messages[0].get_body().should.equal("test message 1")
for name, value in message_tuple[3].items():
dict(messages[0].message_attributes[name]).should.equal(value)
@mock_sqs
def test_delete_batch_operation():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
conn.send_message_batch(queue, [
("my_first_message", 'test message 1', 0),
("my_second_message", 'test message 2', 0),
("my_third_message", 'test message 3', 0),
])
messages = queue.get_messages(2)
queue.delete_message_batch(messages)
queue.count().should.equal(1)
@mock_sqs
def test_sqs_method_not_implemented():
requests.post.when.called_with("https://sqs.amazonaws.com/?Action=[foobar]").should.throw(NotImplementedError)
@mock_sqs
def test_queue_attributes():
conn = boto.connect_sqs('the_key', 'the_secret')
queue_name = 'test-queue'
visibility_timeout = 60
queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout)
attributes = queue.get_attributes()
attributes['QueueArn'].should.look_like(
'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name)
attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout))
attribute_names = queue.get_attributes().keys()
attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible')
attribute_names.should.contain('MessageRetentionPeriod')
attribute_names.should.contain('ApproximateNumberOfMessagesDelayed')
attribute_names.should.contain('MaximumMessageSize')
attribute_names.should.contain('CreatedTimestamp')
attribute_names.should.contain('ApproximateNumberOfMessages')
attribute_names.should.contain('ReceiveMessageWaitTimeSeconds')
attribute_names.should.contain('DelaySeconds')
attribute_names.should.contain('VisibilityTimeout')
attribute_names.should.contain('LastModifiedTimestamp')
attribute_names.should.contain('QueueArn')
@mock_sqs
def test_change_message_visibility_on_invalid_receipt():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=1)
queue.set_message_class(RawMessage)
queue.write(queue.new_message('this is another test message'))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
original_message = messages[0]
queue.count().should.equal(0)
time.sleep(2)
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
original_message.change_visibility.when.called_with(100).should.throw(SQSError)
@mock_sqs
def test_change_message_visibility_on_visible_message():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=1)
queue.set_message_class(RawMessage)
queue.write(queue.new_message('this is another test message'))
queue.count().should.equal(1)
messages = conn.receive_message(queue, number_messages=1)
assert len(messages) == 1
original_message = messages[0]
queue.count().should.equal(0)
time.sleep(2)
queue.count().should.equal(1)
original_message.change_visibility.when.called_with(100).should.throw(SQSError)
@mock_sqs
def test_purge_action():
conn = boto.sqs.connect_to_region("us-east-1")
queue = conn.create_queue('new-queue')
queue.write(queue.new_message('this is another test message'))
queue.count().should.equal(1)
queue.purge()
queue.count().should.equal(0)
@mock_sqs
def test_delete_message_after_visibility_timeout():
VISIBILITY_TIMEOUT = 1
conn = boto.sqs.connect_to_region("us-east-1")
new_queue = conn.create_queue('new-queue', visibility_timeout=VISIBILITY_TIMEOUT)
m1 = Message()
m1.set_body('Message 1!')
new_queue.write(m1)
assert new_queue.count() == 1
m1_retrieved = new_queue.read()
time.sleep(VISIBILITY_TIMEOUT + 1)
m1_retrieved.delete()
assert new_queue.count() == 0
"""
boto3
"""
@mock_sqs
def test_boto3_get_queue():
sqs = boto3.resource('sqs', region_name='us-east-1')
new_queue = sqs.create_queue(QueueName='test-queue')
new_queue.should_not.be.none
new_queue.should.have.property('url').should.contain('test-queue')
queue = sqs.get_queue_by_name(QueueName='test-queue')
queue.attributes.get('QueueArn').should_not.be.none
queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue')
queue.attributes.get('VisibilityTimeout').should_not.be.none
queue.attributes.get('VisibilityTimeout').should.equal('30')
@mock_sqs
def test_boto3_get_inexistent_queue():
sqs = boto3.resource('sqs', region_name='us-east-1')
sqs.get_queue_by_name.when.called_with(QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError)
@mock_sqs
def test_boto3_message_send():
sqs = boto3.resource('sqs', region_name='us-east-1')
queue = sqs.create_queue(QueueName="blah")
msg = queue.send_message(MessageBody="derp")
msg.get('MD5OfMessageBody').should.equal('58fd9edd83341c29f1aebba81c31e257')
msg.get('ResponseMetadata', {}).get('RequestId').should.equal('27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages()
messages.should.have.length_of(1)
@mock_sqs
def test_boto3_set_queue_attributes():
sqs = boto3.resource('sqs', region_name='us-east-1')
queue = sqs.create_queue(QueueName="blah")
queue.attributes['VisibilityTimeout'].should.equal("30")
queue.set_attributes(Attributes={"VisibilityTimeout": "45"})
queue.attributes['VisibilityTimeout'].should.equal("45")
|
|
"""SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 3897 2009/01/13 06:45:54 scons"
import re
import string
import SCons.Node.FS
import SCons.Util
class _Null:
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return apply(Selector, (function,) + args, kw)
else:
return apply(Base, (function,) + args, kw)
class FindPathDirs:
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base:
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
node_class = SCons.Node.FS.Entry,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = function.keys()
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = apply(node_factory, (l,), kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
apply(Base.__init__, (self, None,)+args, kw)
self.dict = dict
self.skeys = dict.keys()
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
apply(Base.__init__, (self,) + args, kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
apply(Current.__init__, (self,) + args, kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
includes = node.includes
else:
includes = self.find_include_names (node)
node.includes = includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(include)
nodes.append((sortkey, n))
nodes.sort()
nodes = map(lambda pair: pair[1], nodes)
return nodes
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
return n, include[1]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(string.join(include))
|
|
#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from tika import parser
from vector import Vector
from random import randint
import argparse, os, csv, itertools, copy, json, sys
union_features = set()
def filterFiles(inputDir, acceptTypes):
filename_list = []
for root, dirnames, files in os.walk(inputDir):
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in files:
if not filename.startswith('.'):
filename_list.append(os.path.join(root, filename))
filename_list = [filename for filename in filename_list if parser.from_file(filename)]
if acceptTypes:
filename_list = [filename for filename in filename_list if str(parser.from_file(filename)['metadata']['Content-Type'].encode('utf-8')).split('/')[-1] in acceptTypes]
else:
print "Accepting all MIME Types....."
return filename_list
def compute_Mean(list_of_points):
if distanceCalc == calcEuclidian:
new_centroid = Vector()
for feature in union_features:
dimension_sum = 0.0
for point in list_of_points:
try:
dimension_sum += point.features[feature]
except KeyError:
continue
new_centroid.features[feature] = float(dimension_sum)/len(list_of_points)
else:
new_centroid = chooseClustroid(list_of_points)
return new_centroid
#select a point which has lowest average distance to other points in the cluster
def chooseClustroid(points):
minDistance = sys.maxint
clustroid = None
for p in points:
sumDistance = 0
for q in points:
sumDistance += calculateDistance(p, q)
sumDistance /= (len(points) - 1)
if sumDistance < minDistance:
clustroid = p
minDistance = sumDistance
#trade-off absolute correctness to speed up
# if minDistance < 0.1:
# break
return clustroid
def cluster_assignment(list_of_points, centroids):
'''
Assign points to nearest centroid
'''
clusters = {}
for point in list_of_points:
distances = []
for centroid in centroids:
distances.append(calculateDistance(point, centroid))
try:
clusters[distances.index(min(distances))].append(point)
except KeyError:
clusters[distances.index(min(distances))] = []
clusters[distances.index(min(distances))].append(point)
return clusters
def move_centroid(clusters):
'''
Shift centroid to mean of assigned points
'''
new_centroids = []
for key in clusters:
new_centroids.append(compute_Mean(clusters[key]))
return new_centroids
def K_Means(list_of_points, no_centroids):
centroids = []
for i in range(no_centroids):
centroids.append(Vector())
for centroid in centroids:
random_point = list_of_points[randint(0, (len(list_of_points)-1) )]
centroid.filename = random_point.filename
centroid.features = copy.deepcopy(random_point.features)
centroid.featuresText = copy.deepcopy(random_point.featuresText)
clusters = cluster_assignment(list_of_points, centroids)
# generates different clusters each time
# leverage the same "Dongni" compute-clusters.py
for i in range(0, 300): # perform iterations till convergence global minima # default 300
new_centroids = move_centroid(clusters) #'''centroids vs new_centroids, use centroids again???'''
clusters = cluster_assignment(list_of_points, new_centroids) #''' #old_clusters = first_clusters '''
''' pseudocode
# clusters => converged / recent values of clusters???
# new_centroids => recent value of c
'''
#print clusters
# compute & return distortion (new_centroids, clusters)
distortion_sum = 0.0
for key in clusters:
for point in clusters[key]:
distortion_sum += calculateDistance(point, new_centroids[key])
distortion = distortion_sum / float(len(list_of_points))
return [distortion, clusters]
def calculateDistance(v1, v2):
if not hasattr(v2, 'filename'):
return distanceCalc(v1, v2)
else:
global distanceCache
if (v1.filename, v2.filename) in distanceCache:
return distanceCache[v1.filename, v2.filename]
elif (v2.filename, v1.filename) in distanceCache:
return distanceCache[v2.filename, v1.filename]
else:
distance = distanceCalc(v1, v2)
distanceCache[v1.filename, v2.filename] = distance
return distance
def calcEuclidian(v1, v2):
return v1.euclidean_dist(v2)
def calcCosine(v1, v2):
return 1 - v1.cosTheta(v2)
def calcEdit(v1, v2):
return v1.edit_dist(v2)
if __name__ == "__main__":
argParser = argparse.ArgumentParser('K-means Clustering of metadata values')
argParser.add_argument('--inputDir', required=True, help='path to directory containing files')
#argParser.add_argument('--outJSON', required=True, help='path to directory for storing the output CSV File, containing k-means cluster assignments')
argParser.add_argument('--accept', nargs='+', type=str, help='Optional: compute similarity only on specified IANA MIME Type(s)')
argParser.add_argument('--measure', type=int, help='Optional: 0 - Euclidean, 1 - Cosine, 2 - Edit (default: 0)')
args = argParser.parse_args()
distanceCache = {}
distanceCalc = calcEuclidian
distanceCalcName = "Euclidean Distance"
if args.measure:
if args.measure == 1:
distanceCalc = calcCosine
distanceCalcName = "Cosine Distance"
elif args.measure == 2:
distanceCalc = calcEdit
distanceCalcName = "Edit Distance"
print "Clustering using " + distanceCalcName
if args.inputDir:# and args.outJSON:
list_of_points = []
for eachFile in filterFiles(args.inputDir, args.accept):
metadata = parser.from_file(eachFile)["metadata"]
if len(metadata) == 0:
continue
list_of_points.append(Vector(eachFile, metadata))
for point in list_of_points:
union_features |= set(point.features.keys())
global_minimas = []
for k in range(2, 5):
global_minima = K_Means(list_of_points, k)
for i in range(0, 50):
iteration = K_Means(list_of_points, k)
print "k= " , k , " iteration ", i
if iteration[0] < global_minima[0]:
global_minima = iteration
global_minimas.append(global_minima)
distortion_diffs = []
for i in range(0, (len(global_minimas)-1) ):
print "k =", (i+2),"distortion value", global_minimas[i][0]
distortion_diffs.append((global_minimas[i][0] - global_minimas[i+1][0]))
print "k =", (i+3),"distortion value", global_minimas[i+1][0]
chosen_k = distortion_diffs.index(max(distortion_diffs)) + 1
true_global_minima = global_minimas[chosen_k]
print "Based on change in distortion value, Chosen k =", (chosen_k+2)
with open("clusters.json", "w") as jsonF:
json_data = {}
clusters = []
for key in true_global_minima[1]: #clusters
cluster_Dict = {}
children = []
for point in true_global_minima[1][key]:
node = {}
node["metadata"] = json.dumps(parser.from_file(point.filename)["metadata"])
node["name"] = point.filename.split('/')[-1]
node["path"] = point.filename
children.append(node)
cluster_Dict["children"] = children
cluster_Dict["name"] = "cluster" + str(key)
clusters.append(cluster_Dict)
json_data["children"] = clusters
json_data["name"] = "clusters"
json.dump(json_data, jsonF)
#compute k-means from k=1 to k=10 and get cost function
#k =1 to k=10 cluster centroids
#get max in each dimentsion of each vector
# run it for same value of k multiple times
# different values of k
'''
if k-means found no clusters, remove that cluster id
=> at iteration 1
or at the end of all iterations??
'''
|
|
import hashlib
import os
import random
import struct
class SMP(object):
def __init__(self, secret=None):
self.p = 2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919
self.modOrder = (self.p - 1) / 2
self.g = 2
self.match = False
if type(secret) is str:
# Encode the string as a hex value
self.secret = int(secret.encode('hex'), 16)
elif type(secret) is int or type(secret) is long:
self.secret = secret
else:
raise TypeError("Secret must be an int or a string. Got type: " + str(type(secret)))
def step1(self):
self.b2 = createRandomExponent()
self.b3 = createRandomExponent()
self.g2 = pow(self.g, self.b2, self.p)
self.g3 = pow(self.g, self.b3, self.p)
(c1, d1) = self.createLogProof('1', self.b2)
(c2, d2) = self.createLogProof('2', self.b3)
# Send g2a, g3a, c1, d1, c2, d2
return packList(self.g2, self.g3, c1, d1, c2, d2)
def step1ForB(self, buffer):
(g2a, g3a, c1, d1, c2, d2) = unpackList(buffer)
if not self.isValidArgument(g2a) or not self.isValidArgument(g3a):
raise ValueError("Invalid g2a/g3a values")
if not self.checkLogProof('1', g2a, c1, d1):
raise ValueError("Proof 1 check failed")
if not self.checkLogProof('2', g3a, c2, d2):
raise ValueError("Proof 2 check failed")
self.g2a = g2a
self.g3a = g3a
self.b2 = createRandomExponent()
self.b3 = createRandomExponent()
b = createRandomExponent()
self.g2 = pow(self.g, self.b2, self.p)
self.g3 = pow(self.g, self.b3, self.p)
(c3, d3) = self.createLogProof('3', self.b2)
(c4, d4) = self.createLogProof('4', self.b3)
self.gb2 = pow(self.g2a, self.b2, self.p)
self.gb3 = pow(self.g3a, self.b3, self.p)
self.pb = pow(self.gb3, b, self.p)
self.qb = mulm(pow(self.g, b, self.p), pow(self.gb2, self.secret, self.p), self.p)
(c5, d5, d6) = self.createCoordsProof('5', self.gb2, self.gb3, b)
# Sends g2b, g3b, pb, qb, all the c's and d's
return packList(self.g2, self.g3, self.pb, self.qb, c3, d3, c4, d4, c5, d5, d6)
def step3(self, buffer):
(g2b, g3b, pb, qb, c3, d3, c4, d4, c5, d5, d6) = unpackList(buffer)
if not self.isValidArgument(g2b) or not self.isValidArgument(g3b) or \
not self.isValidArgument(pb) or not self.isValidArgument(qb):
raise ValueError("Invalid g2b/g3b/pb/qb values")
if not self.checkLogProof('3', g2b, c3, d3):
raise ValueError("Proof 3 check failed")
if not self.checkLogProof('4', g3b, c4, d4):
raise ValueError("Proof 4 check failed")
self.g2b = g2b
self.g3b = g3b
self.ga2 = pow(self.g2b, self.b2, self.p)
self.ga3 = pow(self.g3b, self.b3, self.p)
if not self.checkCoordsProof('5', c5, d5, d6, self.ga2, self.ga3, pb, qb):
raise ValueError("Proof 5 check failed")
s = createRandomExponent()
self.qb = qb
self.pb = pb
self.pa = pow(self.ga3, s, self.p)
self.qa = mulm(pow(self.g, s, self.p), pow(self.ga2, self.secret, self.p), self.p)
(c6, d7, d8) = self.createCoordsProof('6', self.ga2, self.ga3, s)
inv = self.invm(qb)
self.ra = pow(mulm(self.qa, inv, self.p), self.b3, self.p)
(c7, d9) = self.createEqualLogsProof('7', self.qa, inv, self.b3)
# Sends pa, qa, ra, c6, d7, d8, c7, d9
return packList(self.pa, self.qa, self.ra, c6, d7, d8, c7, d9)
def step4(self, buffer):
(pa, qa, ra, c6, d7, d8, c7, d9) = unpackList(buffer)
if not self.isValidArgument(pa) or not self.isValidArgument(qa) or not self.isValidArgument(ra):
raise ValueError("Invalid pa/qa/ra values")
if not self.checkCoordsProof('6', c6, d7, d8, self.gb2, self.gb3, pa, qa):
raise ValueError("Proof 6 check failed")
if not self.checkEqualLogs('7', c7, d9, self.g3a, mulm(qa, self.invm(self.qb), self.p), ra):
raise ValueError("Proof 7 check failed")
inv = self.invm(self.qb)
rb = pow(mulm(qa, inv, self.p), self.b3, self.p)
(c8, d10) = self.createEqualLogsProof('8', qa, inv, self.b3)
rab = pow(ra, self.b3, self.p)
inv = self.invm(self.pb)
if rab == mulm(pa, inv, self.p):
self.match = True
# Send rb, c8, d10
return packList(rb, c8, d10)
def step5(self, buffer):
(rb, c8, d10) = unpackList(buffer)
if not self.isValidArgument(rb):
raise ValueError("Invalid rb values")
if not self.checkEqualLogs('8', c8, d10, self.g3b, mulm(self.qa, self.invm(self.qb), self.p), rb):
raise ValueError("Proof 8 check failed")
rab = pow(rb, self.b3, self.p)
inv = self.invm(self.pb)
if rab == mulm(self.pa, inv, self.p):
self.match = True
def createLogProof(self, version, x):
randExponent = createRandomExponent()
c = sha256(version + str(pow(self.g, randExponent, self.p)))
d = (randExponent - mulm(x, c, self.modOrder)) % self.modOrder
return (c, d)
def checkLogProof(self, version, g, c, d):
gd = pow(self.g, d, self.p)
gc = pow(g, c, self.p)
gdgc = gd * gc % self.p
return (sha256(version + str(gdgc)) == c)
def createCoordsProof(self, version, g2, g3, r):
r1 = createRandomExponent()
r2 = createRandomExponent()
tmp1 = pow(g3, r1, self.p)
tmp2 = mulm(pow(self.g, r1, self.p), pow(g2, r2, self.p), self.p)
c = sha256(version + str(tmp1) + str(tmp2))
# TODO: make a subm function
d1 = (r1 - mulm(r, c, self.modOrder)) % self.modOrder
d2 = (r2 - mulm(self.secret, c, self.modOrder)) % self.modOrder
return (c, d1, d2)
def checkCoordsProof(self, version, c, d1, d2, g2, g3, p, q):
tmp1 = mulm(pow(g3, d1, self.p), pow(p, c, self.p), self.p)
tmp2 = mulm(mulm(pow(self.g, d1, self.p), pow(g2, d2, self.p), self.p), pow(q, c, self.p), self.p)
cprime = sha256(version + str(tmp1) + str(tmp2))
return (c == cprime)
def createEqualLogsProof(self, version, qa, qb, x):
r = createRandomExponent()
tmp1 = pow(self.g, r, self.p)
qab = mulm(qa, qb, self.p)
tmp2 = pow(qab, r, self.p)
c = sha256(version + str(tmp1) + str(tmp2))
tmp1 = mulm(x, c, self.modOrder)
d = (r - tmp1) % self.modOrder
return (c, d)
def checkEqualLogs(self, version, c, d, g3, qab, r):
tmp1 = mulm(pow(self.g, d, self.p), pow(g3, c, self.p), self.p)
tmp2 = mulm(pow(qab, d, self.p), pow(r, c, self.p), self.p)
cprime = sha256(version + str(tmp1) + str(tmp2))
return (c == cprime)
def invm(self, x):
return pow(x, self.p - 2, self.p)
def isValidArgument(self, val):
return (val >= 2 and val <= self.p - 2)
def packList(*items):
buffer = ''
# For each item in the list, convert it to a byte string and add its length as a prefix
for item in items:
bytes = longToBytes(item)
buffer += struct.pack('!I', len(bytes)) + bytes
return buffer
def unpackList(buffer):
items = []
index = 0
while index < len(buffer):
# Get the length of the long (4 byte int before the actual long)
length = struct.unpack('!I', buffer[index:index+4])[0]
index += 4
# Convert the data back to a long and add it to the list
item = bytesToLong(buffer[index:index+length])
items.append(item)
index += length
return items
def bytesToLong(bytes):
length = len(bytes)
string = 0
for i in range(length):
string += byteToLong(bytes[i:i+1]) << 8*(length-i-1)
return string
def longToBytes(long):
bytes = ''
while long != 0:
bytes = longToByte(long & 0xff) + bytes
long >>= 8
return bytes
def byteToLong(byte):
return struct.unpack('B', byte)[0]
def longToByte(long):
return struct.pack('B', long)
def mulm(x, y, mod):
return x * y % mod
def createRandomExponent():
return random.getrandbits(192*8)
def sha256(message):
return long(hashlib.sha256(str(message)).hexdigest(), 16)
|
|
import warnings
from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
# Backup these manually because we do not want them deleted.
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth.models')
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
# Restore these manually (see above).
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
self.restore_warnings_state()
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_with_template_response(self):
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@skipUnlessAuthIsInstalled
def test_middleware_disabled_auth_user(self):
"""
Tests that the messages API successfully falls back to using
user.message_set to store messages directly when the middleware is
disabled.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is raised when one attempts to store a message.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is not raised if 'fail_silently' = True
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Token provider interface."""
import base64
import datetime
import uuid
from oslo_log import log
from oslo_utils import timeutils
from keystone.common import cache
from keystone.common import manager
from keystone.common import provider_api
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.federation import constants
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
TOKENS_REGION = cache.create_region(name='tokens')
MEMOIZE_TOKENS = cache.get_memoization_decorator(
group='token',
region=TOKENS_REGION)
# NOTE(morganfainberg): This is for compatibility in case someone was relying
# on the old location of the UnsupportedTokenVersionException for their code.
UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException
# supported token versions
V3 = token_model.V3
VERSIONS = token_model.VERSIONS
# minimum access rules support
ACCESS_RULES_MIN_VERSION = token_model.ACCESS_RULES_MIN_VERSION
def default_expire_time():
"""Determine when a fresh token should expire.
Expiration time varies based on configuration (see ``[token] expiration``).
:returns: a naive UTC datetime.datetime object
"""
expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
expires_at = timeutils.utcnow() + expire_delta
return expires_at.replace(microsecond=0)
def random_urlsafe_str():
"""Generate a random URL-safe string.
:rtype: str
"""
# chop the padding (==) off the end of the encoding to save space
return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2].decode('utf-8')
class Manager(manager.Manager):
"""Default pivot point for the token provider backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.token.provider'
_provides_api = 'token_provider_api'
V3 = V3
VERSIONS = VERSIONS
def __init__(self):
super(Manager, self).__init__(CONF.token.provider)
self._register_callback_listeners()
def _register_callback_listeners(self):
# This is used by the @dependency.provider decorator to register the
# provider (token_provider_api) manager to listen for trust deletions.
callbacks = {
notifications.ACTIONS.deleted: [
['OS-TRUST:trust', self._drop_token_cache],
['user', self._drop_token_cache],
['domain', self._drop_token_cache],
],
notifications.ACTIONS.disabled: [
['user', self._drop_token_cache],
['domain', self._drop_token_cache],
['project', self._drop_token_cache],
],
notifications.ACTIONS.internal: [
[notifications.INVALIDATE_TOKEN_CACHE,
self._drop_token_cache],
]
}
for event, cb_info in callbacks.items():
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
def _drop_token_cache(self, service, resource_type, operation, payload):
"""Invalidate the entire token cache.
This is a handy private utility method that should be used when
consuming notifications that signal invalidating the token cache.
"""
if CONF.token.cache_on_issue or CONF.token.caching:
TOKENS_REGION.invalidate()
def check_revocation_v3(self, token):
token_values = self.revoke_api.model.build_token_values(token)
PROVIDERS.revoke_api.check_token(token_values)
def check_revocation(self, token):
return self.check_revocation_v3(token)
def validate_token(self, token_id, window_seconds=0,
access_rules_support=None):
if not token_id:
raise exception.TokenNotFound(_('No token in the request'))
try:
token = self._validate_token(token_id)
self._is_valid_token(token, window_seconds=window_seconds)
self._validate_token_access_rules(token, access_rules_support)
return token
except exception.Unauthorized as e:
LOG.debug('Unable to validate token: %s', e)
raise exception.TokenNotFound(token_id=token_id)
@MEMOIZE_TOKENS
def _validate_token(self, token_id):
(user_id, methods, audit_ids, system, domain_id,
project_id, trust_id, federated_group_ids, identity_provider_id,
protocol_id, access_token_id, app_cred_id, issued_at,
expires_at) = self.driver.validate_token(token_id)
token = token_model.TokenModel()
token.user_id = user_id
token.methods = methods
if len(audit_ids) > 1:
token.parent_audit_id = audit_ids.pop()
token.audit_id = audit_ids.pop()
token.system = system
token.domain_id = domain_id
token.project_id = project_id
token.trust_id = trust_id
token.access_token_id = access_token_id
token.application_credential_id = app_cred_id
token.expires_at = expires_at
if federated_group_ids is not None:
token.is_federated = True
token.identity_provider_id = identity_provider_id
token.protocol_id = protocol_id
token.federated_groups = federated_group_ids
token.mint(token_id, issued_at)
return token
def _is_valid_token(self, token, window_seconds=0):
"""Verify the token is valid format and has not expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
expiry = timeutils.parse_isotime(token.expires_at)
expiry = timeutils.normalize_time(expiry)
# add a window in which you can fetch a token beyond expiry
expiry += datetime.timedelta(seconds=window_seconds)
except Exception:
LOG.exception('Unexpected error or malformed token '
'determining token expiry: %s', token)
raise exception.TokenNotFound(_('Failed to validate token'))
if current_time < expiry:
self.check_revocation(token)
# Token has not expired and has not been revoked.
return None
else:
raise exception.TokenNotFound(_('Failed to validate token'))
def _validate_token_access_rules(self, token, access_rules_support=None):
if token.application_credential_id:
app_cred_api = PROVIDERS.application_credential_api
app_cred = app_cred_api.get_application_credential(
token.application_credential_id)
if (app_cred.get('access_rules') is not None and
(not access_rules_support or
(float(access_rules_support) < ACCESS_RULES_MIN_VERSION))):
LOG.exception('Attempted to use application credential'
' access rules with a middleware that does not'
' understand them. You must upgrade'
' keystonemiddleware on all services that'
' accept application credentials as an'
' authentication method.')
raise exception.TokenNotFound(_('Failed to validate token'))
def issue_token(self, user_id, method_names, expires_at=None,
system=None, project_id=None, domain_id=None,
auth_context=None, trust_id=None, app_cred_id=None,
parent_audit_id=None):
# NOTE(lbragstad): Grab a blank token object and use composition to
# build the token according to the authentication and authorization
# context. This cuts down on the amount of logic we have to stuff into
# the TokenModel's __init__() method.
token = token_model.TokenModel()
token.methods = method_names
token.system = system
token.domain_id = domain_id
token.project_id = project_id
token.trust_id = trust_id
token.application_credential_id = app_cred_id
token.audit_id = random_urlsafe_str()
token.parent_audit_id = parent_audit_id
if auth_context:
if constants.IDENTITY_PROVIDER in auth_context:
token.is_federated = True
token.protocol_id = auth_context[constants.PROTOCOL]
idp_id = auth_context[constants.IDENTITY_PROVIDER]
if isinstance(idp_id, bytes):
idp_id = idp_id.decode('utf-8')
token.identity_provider_id = idp_id
token.user_id = auth_context['user_id']
token.federated_groups = [
{'id': group} for group in auth_context['group_ids']
]
if 'access_token_id' in auth_context:
token.access_token_id = auth_context['access_token_id']
if not token.user_id:
token.user_id = user_id
token.user_domain_id = token.user['domain_id']
if isinstance(expires_at, datetime.datetime):
token.expires_at = utils.isotime(expires_at, subsecond=True)
if isinstance(expires_at, str):
token.expires_at = expires_at
elif not expires_at:
token.expires_at = utils.isotime(
default_expire_time(), subsecond=True
)
token_id, issued_at = self.driver.generate_id_and_issued_at(token)
token.mint(token_id, issued_at)
# cache the token object and with ID
if CONF.token.cache_on_issue or CONF.token.caching:
# NOTE(amakarov): here and above TOKENS_REGION is to be passed
# to serve as required positional "self" argument. It's ignored,
# so I've put it here for convenience - any placeholder is fine.
self._validate_token.set(token, self, token.id)
return token
def invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._validate_token.invalidate(self, token_id)
def revoke_token(self, token_id, revoke_chain=False):
token = self.validate_token(token_id)
project_id = token.project_id if token.project_scoped else None
domain_id = token.domain_id if token.domain_scoped else None
if revoke_chain:
PROVIDERS.revoke_api.revoke_by_audit_chain_id(
token.parent_audit_id, project_id=project_id,
domain_id=domain_id
)
else:
PROVIDERS.revoke_api.revoke_by_audit_id(token.audit_id)
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self.invalidate_individual_token_cache(token_id)
|
|
#!/usr/bin/env python
"""This module defines the "%scd" magic command in an IPython session.
The %scd or smart-cd command can find and change to any directory,
without having to know its exact path. In addition, this module updates
the %cd, %popd, %pushd magic commands so that they record the visited
directory in the scd index file.
The scd executable script must be in the system PATH or the
ipy_scd.scd_executable must be set to its full path.
To define the %scd magic for every IPython session, add this module
to the c.TerminalIPythonApp.extensions list in the
IPYTHON/profile_default/ipython_config.py file.
"""
import IPython
# Require IPython version 0.13 or later --------------------------------------
ipyversion = []
for w in IPython.__version__.split('.'):
if not w.isdigit(): break
ipyversion.append(int(w))
assert ipyversion >= [0, 13], "ipy_scd requires IPython 0.13 or later."
# We have a recent IPython here ----------------------------------------------
import os
import subprocess
from IPython.core.magic import Magics, magics_class, line_magic
class _cdcommands:
"""Namespace class for saving original cd-related commands."""
cd = None
pushd = None
popd = None
pass
def whereisexecutable(program):
'''Return a list of files in the system PATH executable by the user.
program -- command name that is looked for in the PATH
Return a list of absolute paths to the program. When program
contains any path components, just check if that file is executable.
'''
isexecutable = lambda f: os.access(f, os.X_OK) and os.path.isfile(f)
ppath, pname = os.path.split(program)
rv = []
if ppath:
rv += [program]
else:
rv += [os.path.join(d, program)
for d in os.environ['PATH'].split(os.pathsep)]
rv = [os.path.abspath(f) for f in rv if isexecutable(f)]
return rv
# full path to the scd_executable or an empty string when not found
scd_executable = (whereisexecutable('scd') + [''])[0]
@magics_class
class SCDMagics(Magics):
@line_magic
def scd(self, arg):
'''scd -- smart change to a recently used directory
usage: scd [options] [pattern1 pattern2 ...]
Go to a directory path that matches all patterns. Prefer recent or
frequently visited directories as found in the directory index.
Display a selection menu in case of multiple matches.
Special patterns:
^PAT match at the path root, "^/home"
PAT$ match paths ending with PAT, "man$"
./ match paths under the current directory
:PAT require PAT to span the tail, ":doc", ":re/doc"
Options:
-a, --add add current or specified directories to the index.
--unindex remove current or specified directories from the index.
-r, --recursive apply options --add or --unindex recursively.
--alias=ALIAS create alias for the current or specified directory and
store it in ~/.scdalias.zsh.
--unalias remove ALIAS definition for the current or specified
directory from ~/.scdalias.zsh.
Use "OLD" to purge aliases to non-existent directories.
-A, --all display all directories even those excluded by patterns
in ~/.scdignore. Disregard unique match for a directory
alias and filtering of less likely paths.
-p, --push use "pushd" to change to the target directory.
--list show matching directories and exit.
-v, --verbose display directory rank in the selection menu.
-h, --help display this message and exit.
'''
import tempfile
import shlex
scdfile = tempfile.NamedTemporaryFile('r')
env = dict(os.environ)
env['SCD_SCRIPT'] = scdfile.name
args = [scd_executable] + shlex.split(str(arg))
retcode = subprocess.call(args, env=env)
cmd = scdfile.read().rstrip()
scdfile.close()
cpth = cmd.split(' ', 1)
if retcode == 0 and cpth[0] in ('cd', 'pushd'):
fcd = getattr(_cdcommands, cpth[0])
fcd(cpth[1])
_scd_record_cwd()
return
from IPython.core.magics import OSMagics
@line_magic
def cd(self, arg):
rv = _cdcommands.cd(arg)
_scd_record_cwd()
return rv
cd.__doc__ = OSMagics.cd.__doc__
@line_magic
def pushd(self, arg):
rv = _cdcommands.pushd(arg)
_scd_record_cwd()
return rv
pushd.__doc__ = OSMagics.pushd.__doc__
@line_magic
def popd(self, arg):
rv = _cdcommands.popd(arg)
_scd_record_cwd()
return rv
popd.__doc__ = OSMagics.popd.__doc__
del OSMagics
# Function for loading the scd magic with the 0.11 or later API
def load_ipython_extension(ipython):
'''Define the scd command and overloads of cd, pushd, popd that record
new visited paths to the scdhistory file.
When flag is False, revert to the default behavior.
'''
_raiseIfNoExecutable()
if _cdcommands.cd is None:
_cdcommands.cd = ipython.find_magic('cd')
_cdcommands.pushd = ipython.find_magic('pushd')
_cdcommands.popd = ipython.find_magic('popd')
ipython.register_magics(SCDMagics)
global _scd_active
_scd_active = True
return
def unload_ipython_extension(ipython):
global _scd_active
_scd_active = False
ipython.magics_manager.magics['line'].pop('scd', None)
if _cdcommands.cd is not None:
ipython.register_magic_function(_cdcommands.cd)
ipython.register_magic_function(_cdcommands.pushd)
ipython.register_magic_function(_cdcommands.popd)
return
def _scd_record_cwd(cwd=None):
import time
global _scd_last_directory
if not _scd_active:
return
if cwd is None:
cwd = os.getcwd()
if cwd == _scd_last_directory:
return
_scd_last_directory = cwd
scd_histfile = (os.environ.get('SCD_HISTFILE') or
os.path.expanduser('~/.scdhistory'))
is_new_file = not os.path.exists(scd_histfile)
fmt = ': {:.0f}:0;{}\n'
with open(scd_histfile, 'a') as fp:
fp.write(fmt.format(time.time(), cwd))
if is_new_file:
os.chmod(scd_histfile, 0o600)
return
_scd_last_directory = ''
def _raiseIfNoExecutable():
emsg = ("scd executable not found. Place it to a directory in the "
"PATH or define the ipy_scd.scd_executable variable.")
if not scd_executable:
raise RuntimeError(emsg)
return
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
|
|
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 3.0 or later
Changelog:
2009-05-28, Pilgrim: ported to Python 3
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger",
"Mark Pilgrim"]
__license__ = "MIT"
__version__ = "$Rev: 259 $"
import re
import sys
import email
import email.utils
import email.message
import email.feedparser
import io
import gzip
import zlib
import http.client
import urllib.parse
import base64
import os
import copy
import calendar
import time
import random
from hashlib import sha1 as _sha, md5 as _md5
import hmac
from gettext import gettext as _
import socket
import ssl
_ssl_wrap_socket = ssl.wrap_socket
try:
import socks
except ImportError:
socks = None
from .iri2uri import iri2uri
def has_timeout(timeout):
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in list(response.keys()) if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(br'^\w+://')
re_url_scheme_s = re.compile(r'^\w+://')
re_slash = re.compile(br'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme_s.match(filename):
if isinstance(filename,bytes):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,str):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest().encode('utf-8')
filename = re_url_scheme.sub(b"", filename)
filename = re_slash.sub(b",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return b",".join((filename, filemd5)).decode('utf-8')
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.items()])
def _parse_cache_control(headers):
retval = {}
if 'cache-control' in headers:
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headername in headers:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if 'pragma' in request_headers and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif 'no-cache' in cc:
retval = "TRANSPARENT"
elif 'no-cache' in cc_response:
retval = "STALE"
elif 'only-if-cached' in cc:
retval = "FRESH"
elif 'date' in response_headers:
date = calendar.timegm(email.utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if 'max-age' in cc_response:
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif 'expires' in response_headers:
expires = email.utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if 'no-store' in cc or 'no-store' in cc_response:
cache.delete(cachekey)
else:
info = email.message.Message()
for key, value in response_headers.items():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = b"".join([status_header.encode('utf-8'), header_str.encode('utf-8'), content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5(("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).encode('utf-8')).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha(("%s%s%s" % (cnonce, iso_now, password)).encode('utf-8')).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
def __eq__(self, auth):
return False
def __ne__(self, auth):
return True
def __lt__(self, auth):
return True
def __gt__(self, auth):
return False
def __le__(self, auth):
return True
def __ge__(self, auth):
return False
def __bool__(self):
return True
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode(("%s:%s" % self.credentials).encode('utf-8')).strip().decode('utf-8')
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x.encode('utf-8')).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if 'authentication-info' not in response:
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if 'nextnonce' in updated_challenge:
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib.parse import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = open(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = open(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(http.client.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
http.client.HTTPConnection.__init__(self, host, port, strict, timeout)
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout)
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print("connect: (%s, %s)" % (self.host, self.port))
self.sock.connect(sa)
except socket.error as msg:
if self.debuglevel > 0:
print('connect fail:', (self.host, self.port))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
class HTTPSConnectionWithTimeout(http.client.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
self.proxy_info = proxy_info
http.client.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict, timeout=timeout)
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock = _ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, http.client.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = b""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if 'location' not in response and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if 'location' in response:
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urllib.parse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if 'content-location' not in response:
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if 'if-none-match' in headers:
del headers['if-none-match']
if 'if-modified-since' in headers:
del headers['if-modified-since']
if 'location' in response:
location = response['location']
old_response = copy.deepcopy(response)
if 'content-location' not in old_response:
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if 'content-location' not in response:
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if 'user-agent' not in headers:
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split(b'\r\n\r\n', 1)
info = info.decode('utf-8')
feedparser = email.feedparser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and 'etag' in info and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if '-x-permanent-redirect-url' in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = b""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if 'etag' in info and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if 'last-modified' in info and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if 'only-if-cached'in cc:
info['status'] = '504'
response = Response(info)
content = b""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception as e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = b"Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e).encode('utf-8')
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.message or
# an httplib.HTTPResponse object.
if isinstance(info, http.client.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.message.Message):
for key, value in list(info.items()):
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.items():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError(name)
|
|
"""
SleekXMPP: The Sleek XMPP Library
Implementation of xeps for Internet of Things
http://wiki.xmpp.org/web/Tech_pages/IoT_systems
Copyright (C) 2013 Sustainable Innovation, Joachim.lindborg@sust.se, bjorn.westrom@consoden.se
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp import Iq, Message
from sleekxmpp.xmlstream import register_stanza_plugin, ElementBase, ET, JID
from re import match
class Sensordata(ElementBase):
""" Placeholder for the namespace, not used as a stanza """
namespace = 'urn:xmpp:iot:sensordata'
name = 'sensordata'
plugin_attrib = name
interfaces = set(tuple())
class FieldTypes():
"""
All field types are optional booleans that default to False
"""
field_types = set([ 'momentary','peak','status','computed','identity','historicalSecond','historicalMinute','historicalHour', \
'historicalDay','historicalWeek','historicalMonth','historicalQuarter','historicalYear','historicalOther'])
class FieldStatus():
"""
All field statuses are optional booleans that default to False
"""
field_status = set([ 'missing','automaticEstimate','manualEstimate','manualReadout','automaticReadout','timeOffset','warning','error', \
'signed','invoiced','endOfSeries','powerFailure','invoiceConfirmed'])
class Request(ElementBase):
namespace = 'urn:xmpp:iot:sensordata'
name = 'req'
plugin_attrib = name
interfaces = set(['seqnr','nodes','fields','serviceToken','deviceToken','userToken','from','to','when','historical','all'])
interfaces.update(FieldTypes.field_types);
_flags = set(['serviceToken','deviceToken','userToken','from','to','when','historical','all']);
_flags.update(FieldTypes.field_types);
def __init__(self, xml=None, parent=None):
ElementBase.__init__(self, xml, parent);
self._nodes = set()
self._fields = set()
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup
Caches item information.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
ElementBase.setup(self, xml)
self._nodes = set([node['nodeId'] for node in self['nodes']])
self._fields = set([field['name'] for field in self['fields']])
def _get_flags(self):
"""
Helper function for getting of flags. Returns all flags in
dictionary format: { "flag name": "flag value" ... }
"""
flags = {};
for f in self._flags:
if not self[f] == "":
flags[f] = self[f];
return flags;
def _set_flags(self, flags):
"""
Helper function for setting of flags.
Arguments:
flags -- Flags in dictionary format: { "flag name": "flag value" ... }
"""
for f in self._flags:
if flags is not None and f in flags:
self[f] = flags[f];
else:
self[f] = None;
def add_node(self, nodeId, sourceId=None, cacheType=None):
"""
Add a new node element. Each item is required to have a
nodeId, but may also specify a sourceId value and cacheType.
Arguments:
nodeId -- The ID for the node.
sourceId -- [optional] identifying the data source controlling the device
cacheType -- [optional] narrowing down the search to a specific kind of node
"""
if nodeId not in self._nodes:
self._nodes.add((nodeId))
node = RequestNode(parent=self)
node['nodeId'] = nodeId
node['sourceId'] = sourceId
node['cacheType'] = cacheType
self.iterables.append(node)
return node
return None
def del_node(self, nodeId):
"""
Remove a single node.
Arguments:
nodeId -- Node ID of the item to remove.
"""
if nodeId in self._nodes:
nodes = [i for i in self.iterables if isinstance(i, RequestNode)]
for node in nodes:
if node['nodeId'] == nodeId:
self.xml.remove(node.xml)
self.iterables.remove(node)
return True
return False
def get_nodes(self):
"""Return all nodes."""
nodes = set()
for node in self['substanzas']:
if isinstance(node, RequestNode):
nodes.add(node)
return nodes
def set_nodes(self, nodes):
"""
Set or replace all nodes. The given nodes must be in a
list or set where each item is a tuple of the form:
(nodeId, sourceId, cacheType)
Arguments:
nodes -- A series of nodes in tuple format.
"""
self.del_nodes()
for node in nodes:
if isinstance(node, RequestNode):
self.add_node(node['nodeId'], node['sourceId'], node['cacheType'])
else:
nodeId, sourceId, cacheType = node
self.add_node(nodeId, sourceId, cacheType)
def del_nodes(self):
"""Remove all nodes."""
self._nodes = set()
nodes = [i for i in self.iterables if isinstance(i, RequestNode)]
for node in nodes:
self.xml.remove(node.xml)
self.iterables.remove(node)
def add_field(self, name):
"""
Add a new field element. Each item is required to have a
name.
Arguments:
name -- The name of the field.
"""
if name not in self._fields:
self._fields.add((name))
field = RequestField(parent=self)
field['name'] = name
self.iterables.append(field)
return field
return None
def del_field(self, name):
"""
Remove a single field.
Arguments:
name -- name of field to remove.
"""
if name in self._fields:
fields = [i for i in self.iterables if isinstance(i, RequestField)]
for field in fields:
if field['name'] == name:
self.xml.remove(field.xml)
self.iterables.remove(field)
return True
return False
def get_fields(self):
"""Return all fields."""
fields = set()
for field in self['substanzas']:
if isinstance(field, RequestField):
fields.add(field)
return fields
def set_fields(self, fields):
"""
Set or replace all fields. The given fields must be in a
list or set where each item is RequestField or string
Arguments:
fields -- A series of fields in RequestField or string format.
"""
self.del_fields()
for field in fields:
if isinstance(field, RequestField):
self.add_field(field['name'])
else:
self.add_field(field)
def del_fields(self):
"""Remove all fields."""
self._fields = set()
fields = [i for i in self.iterables if isinstance(i, RequestField)]
for field in fields:
self.xml.remove(field.xml)
self.iterables.remove(field)
class RequestNode(ElementBase):
""" Node element in a request """
namespace = 'urn:xmpp:iot:sensordata'
name = 'node'
plugin_attrib = name
interfaces = set(['nodeId','sourceId','cacheType'])
class RequestField(ElementBase):
""" Field element in a request """
namespace = 'urn:xmpp:iot:sensordata'
name = 'field'
plugin_attrib = name
interfaces = set(['name'])
class Accepted(ElementBase):
namespace = 'urn:xmpp:iot:sensordata'
name = 'accepted'
plugin_attrib = name
interfaces = set(['seqnr','queued'])
class Started(ElementBase):
namespace = 'urn:xmpp:iot:sensordata'
name = 'started'
plugin_attrib = name
interfaces = set(['seqnr'])
class Failure(ElementBase):
namespace = 'urn:xmpp:iot:sensordata'
name = 'failure'
plugin_attrib = name
interfaces = set(['seqnr','done'])
class Error(ElementBase):
""" Error element in a request failure """
namespace = 'urn:xmpp:iot:sensordata'
name = 'error'
plugin_attrib = name
interfaces = set(['nodeId','timestamp','sourceId','cacheType','text'])
def get_text(self):
"""Return then contents inside the XML tag."""
return self.xml.text
def set_text(self, value):
"""Set then contents inside the XML tag.
:param value: string
"""
self.xml.text = value;
return self
def del_text(self):
"""Remove the contents inside the XML tag."""
self.xml.text = ""
return self
class Rejected(ElementBase):
namespace = 'urn:xmpp:iot:sensordata'
name = 'rejected'
plugin_attrib = name
interfaces = set(['seqnr','error'])
sub_interfaces = set(['error'])
class Fields(ElementBase):
""" Fields element, top level in a response message with data """
namespace = 'urn:xmpp:iot:sensordata'
name = 'fields'
plugin_attrib = name
interfaces = set(['seqnr','done','nodes'])
def __init__(self, xml=None, parent=None):
ElementBase.__init__(self, xml, parent);
self._nodes = set()
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup
Caches item information.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
ElementBase.setup(self, xml)
self._nodes = set([node['nodeId'] for node in self['nodes']])
def add_node(self, nodeId, sourceId=None, cacheType=None, substanzas=None):
"""
Add a new node element. Each item is required to have a
nodeId, but may also specify a sourceId value and cacheType.
Arguments:
nodeId -- The ID for the node.
sourceId -- [optional] identifying the data source controlling the device
cacheType -- [optional] narrowing down the search to a specific kind of node
"""
if nodeId not in self._nodes:
self._nodes.add((nodeId))
node = FieldsNode(parent=self)
node['nodeId'] = nodeId
node['sourceId'] = sourceId
node['cacheType'] = cacheType
if substanzas is not None:
node.set_timestamps(substanzas)
self.iterables.append(node)
return node
return None
def del_node(self, nodeId):
"""
Remove a single node.
Arguments:
nodeId -- Node ID of the item to remove.
"""
if nodeId in self._nodes:
nodes = [i for i in self.iterables if isinstance(i, FieldsNode)]
for node in nodes:
if node['nodeId'] == nodeId:
self.xml.remove(node.xml)
self.iterables.remove(node)
return True
return False
def get_nodes(self):
"""Return all nodes."""
nodes = set()
for node in self['substanzas']:
if isinstance(node, FieldsNode):
nodes.add(node)
return nodes
def set_nodes(self, nodes):
"""
Set or replace all nodes. The given nodes must be in a
list or set where each item is a tuple of the form:
(nodeId, sourceId, cacheType)
Arguments:
nodes -- A series of nodes in tuple format.
"""
#print(str(id(self)) + " set_nodes: got " + str(nodes))
self.del_nodes()
for node in nodes:
if isinstance(node, FieldsNode):
self.add_node(node['nodeId'], node['sourceId'], node['cacheType'], substanzas=node['substanzas'])
else:
nodeId, sourceId, cacheType = node
self.add_node(nodeId, sourceId, cacheType)
def del_nodes(self):
"""Remove all nodes."""
self._nodes = set()
nodes = [i for i in self.iterables if isinstance(i, FieldsNode)]
for node in nodes:
self.xml.remove(node.xml)
self.iterables.remove(node)
class FieldsNode(ElementBase):
""" Node element in response fields """
namespace = 'urn:xmpp:iot:sensordata'
name = 'node'
plugin_attrib = name
interfaces = set(['nodeId','sourceId','cacheType','timestamps'])
def __init__(self, xml=None, parent=None):
ElementBase.__init__(self, xml, parent);
self._timestamps = set()
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup
Caches item information.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
ElementBase.setup(self, xml)
self._timestamps = set([ts['value'] for ts in self['timestamps']])
def add_timestamp(self, timestamp, substanzas=None):
"""
Add a new timestamp element.
Arguments:
timestamp -- The timestamp in ISO format.
"""
#print(str(id(self)) + " add_timestamp: " + str(timestamp))
if timestamp not in self._timestamps:
self._timestamps.add((timestamp))
ts = Timestamp(parent=self)
ts['value'] = timestamp
if not substanzas is None:
ts.set_datas(substanzas);
#print("add_timestamp with substanzas: " + str(substanzas))
self.iterables.append(ts)
#print(str(id(self)) + " added_timestamp: " + str(id(ts)))
return ts
return None
def del_timestamp(self, timestamp):
"""
Remove a single timestamp.
Arguments:
timestamp -- timestamp (in ISO format) of the item to remove.
"""
#print("del_timestamp: ")
if timestamp in self._timestamps:
timestamps = [i for i in self.iterables if isinstance(i, Timestamp)]
for ts in timestamps:
if ts['value'] == timestamp:
self.xml.remove(ts.xml)
self.iterables.remove(ts)
return True
return False
def get_timestamps(self):
"""Return all timestamps."""
#print(str(id(self)) + " get_timestamps: ")
timestamps = set()
for timestamp in self['substanzas']:
if isinstance(timestamp, Timestamp):
timestamps.add(timestamp)
return timestamps
def set_timestamps(self, timestamps):
"""
Set or replace all timestamps. The given timestamps must be in a
list or set where each item is a timestamp
Arguments:
timestamps -- A series of timestamps.
"""
#print(str(id(self)) + " set_timestamps: got " + str(timestamps))
self.del_timestamps()
for timestamp in timestamps:
#print("set_timestamps: subset " + str(timestamp))
#print("set_timestamps: subset.substanzas " + str(timestamp['substanzas']))
if isinstance(timestamp, Timestamp):
self.add_timestamp(timestamp['value'], substanzas=timestamp['substanzas'])
else:
#print("set_timestamps: got " + str(timestamp))
self.add_timestamp(timestamp)
def del_timestamps(self):
"""Remove all timestamps."""
#print(str(id(self)) + " del_timestamps: ")
self._timestamps = set()
timestamps = [i for i in self.iterables if isinstance(i, Timestamp)]
for timestamp in timestamps:
self.xml.remove(timestamp.xml)
self.iterables.remove(timestamp)
class Field(ElementBase):
"""
Field element in response Timestamp. This is a base class,
all instances of fields added to Timestamp must be of types:
DataNumeric
DataString
DataBoolean
DataDateTime
DataTimeSpan
DataEnum
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'field'
plugin_attrib = name
interfaces = set(['name','module','stringIds']);
interfaces.update(FieldTypes.field_types);
interfaces.update(FieldStatus.field_status);
_flags = set();
_flags.update(FieldTypes.field_types);
_flags.update(FieldStatus.field_status);
def set_stringIds(self, value):
"""Verifies stringIds according to regexp from specification XMPP-0323.
:param value: string
"""
pattern = re.compile("^\d+([|]\w+([.]\w+)*([|][^,]*)?)?(,\d+([|]\w+([.]\w+)*([|][^,]*)?)?)*$")
if pattern.match(value) is not None:
self.xml.stringIds = value;
else:
# Bad content, add nothing
pass
return self
def _get_flags(self):
"""
Helper function for getting of flags. Returns all flags in
dictionary format: { "flag name": "flag value" ... }
"""
flags = {};
for f in self._flags:
if not self[f] == "":
flags[f] = self[f];
return flags;
def _set_flags(self, flags):
"""
Helper function for setting of flags.
Arguments:
flags -- Flags in dictionary format: { "flag name": "flag value" ... }
"""
for f in self._flags:
if flags is not None and f in flags:
self[f] = flags[f];
else:
self[f] = None;
def _get_typename(self):
return "invalid type, use subclasses!";
class Timestamp(ElementBase):
""" Timestamp element in response Node """
namespace = 'urn:xmpp:iot:sensordata'
name = 'timestamp'
plugin_attrib = name
interfaces = set(['value','datas'])
def __init__(self, xml=None, parent=None):
ElementBase.__init__(self, xml, parent);
self._datas = set()
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup
Caches item information.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
ElementBase.setup(self, xml)
self._datas = set([data['name'] for data in self['datas']])
def add_data(self, typename, name, value, module=None, stringIds=None, unit=None, dataType=None, flags=None):
"""
Add a new data element.
Arguments:
typename -- The type of data element (numeric, string, boolean, dateTime, timeSpan or enum)
value -- The value of the data element
module -- [optional] language module to use for the data element
stringIds -- [optional] The stringIds used to find associated text in the language module
unit -- [optional] The unit. Only applicable for type numeric
dataType -- [optional] The dataType. Only applicable for type enum
"""
if name not in self._datas:
dataObj = None;
if typename == "numeric":
dataObj = DataNumeric(parent=self);
dataObj['unit'] = unit;
elif typename == "string":
dataObj = DataString(parent=self);
elif typename == "boolean":
dataObj = DataBoolean(parent=self);
elif typename == "dateTime":
dataObj = DataDateTime(parent=self);
elif typename == "timeSpan":
dataObj = DataTimeSpan(parent=self);
elif typename == "enum":
dataObj = DataEnum(parent=self);
dataObj['dataType'] = dataType;
dataObj['name'] = name;
dataObj['value'] = value;
dataObj['module'] = module;
dataObj['stringIds'] = stringIds;
if flags is not None:
dataObj._set_flags(flags);
self._datas.add(name)
self.iterables.append(dataObj)
return dataObj
return None
def del_data(self, name):
"""
Remove a single data element.
Arguments:
data_name -- The data element name to remove.
"""
if name in self._datas:
datas = [i for i in self.iterables if isinstance(i, Field)]
for data in datas:
if data['name'] == name:
self.xml.remove(data.xml)
self.iterables.remove(data)
return True
return False
def get_datas(self):
""" Return all data elements. """
datas = set()
for data in self['substanzas']:
if isinstance(data, Field):
datas.add(data)
return datas
def set_datas(self, datas):
"""
Set or replace all data elements. The given elements must be in a
list or set where each item is a data element (numeric, string, boolean, dateTime, timeSpan or enum)
Arguments:
datas -- A series of data elements.
"""
self.del_datas()
for data in datas:
self.add_data(typename=data._get_typename(), name=data['name'], value=data['value'], module=data['module'], stringIds=data['stringIds'], unit=data['unit'], dataType=data['dataType'], flags=data._get_flags())
def del_datas(self):
"""Remove all data elements."""
self._datas = set()
datas = [i for i in self.iterables if isinstance(i, Field)]
for data in datas:
self.xml.remove(data.xml)
self.iterables.remove(data)
class DataNumeric(Field):
"""
Field data of type numeric.
Note that the value is expressed as a string.
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'numeric'
plugin_attrib = name
interfaces = set(['value', 'unit']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "numeric"
class DataString(Field):
"""
Field data of type string
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'string'
plugin_attrib = name
interfaces = set(['value']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "string"
class DataBoolean(Field):
"""
Field data of type boolean.
Note that the value is expressed as a string.
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'boolean'
plugin_attrib = name
interfaces = set(['value']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "boolean"
class DataDateTime(Field):
"""
Field data of type dateTime.
Note that the value is expressed as a string.
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'dateTime'
plugin_attrib = name
interfaces = set(['value']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "dateTime"
class DataTimeSpan(Field):
"""
Field data of type timeSpan.
Note that the value is expressed as a string.
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'timeSpan'
plugin_attrib = name
interfaces = set(['value']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "timeSpan"
class DataEnum(Field):
"""
Field data of type enum.
Note that the value is expressed as a string.
"""
namespace = 'urn:xmpp:iot:sensordata'
name = 'enum'
plugin_attrib = name
interfaces = set(['value', 'dataType']);
interfaces.update(Field.interfaces);
def _get_typename(self):
return "enum"
class Done(ElementBase):
""" Done element used to signal that all data has been transferred """
namespace = 'urn:xmpp:iot:sensordata'
name = 'done'
plugin_attrib = name
interfaces = set(['seqnr'])
class Cancel(ElementBase):
""" Cancel element used to signal that a request shall be cancelled """
namespace = 'urn:xmpp:iot:sensordata'
name = 'cancel'
plugin_attrib = name
interfaces = set(['seqnr'])
class Cancelled(ElementBase):
""" Cancelled element used to signal that cancellation is confirmed """
namespace = 'urn:xmpp:iot:sensordata'
name = 'cancelled'
plugin_attrib = name
interfaces = set(['seqnr'])
register_stanza_plugin(Iq, Request)
register_stanza_plugin(Request, RequestNode, iterable=True)
register_stanza_plugin(Request, RequestField, iterable=True)
register_stanza_plugin(Iq, Accepted)
register_stanza_plugin(Message, Failure)
register_stanza_plugin(Failure, Error)
register_stanza_plugin(Iq, Rejected)
register_stanza_plugin(Message, Fields)
register_stanza_plugin(Fields, FieldsNode, iterable=True)
register_stanza_plugin(FieldsNode, Timestamp, iterable=True)
register_stanza_plugin(Timestamp, Field, iterable=True)
register_stanza_plugin(Timestamp, DataNumeric, iterable=True)
register_stanza_plugin(Timestamp, DataString, iterable=True)
register_stanza_plugin(Timestamp, DataBoolean, iterable=True)
register_stanza_plugin(Timestamp, DataDateTime, iterable=True)
register_stanza_plugin(Timestamp, DataTimeSpan, iterable=True)
register_stanza_plugin(Timestamp, DataEnum, iterable=True)
register_stanza_plugin(Message, Started)
register_stanza_plugin(Iq, Cancel)
register_stanza_plugin(Iq, Cancelled)
|
|
from django.shortcuts import render as render_to_response, redirect
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404
from django.contrib.auth.forms import UserCreationForm
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.db.models import Count
from djangodash.forms import *
from djangodash.models import *
import settings
import json
def render(template, data, request):
"""
Wrapper for rendering a response.
"""
return render_to_response(request, template, data)
def home(request):
"""
Home page.
"""
user = request.user
is_logged_in = user.is_authenticated()
sort = request.GET.get("sort_options")
if sort: sort = sort.strip()
if request.method == "POST":
if not user.is_authenticated():
return redirect(reverse("home"))
form = ThreadForm(request.POST)
if form.is_valid():
# Create a new thread
thread_content = form.cleaned_data["content"]
new_thread = Thread(content=thread_content,
creator=user)
new_thread.save()
return redirect(reverse("home"))
else:
form = ThreadForm()
# Get all threads
num_threads = settings.THREADS_PER_PAGE
sort_by = "-date"
if sort == "numcomments":
sort_by = "-comment_count"
if sort != "personal":
threads = Thread.objects.all() \
.annotate(comment_count=Count('comment')) \
.order_by(sort_by)
else:
# Personalize results:
# Return the threads created by the users who the current
# user is following.
if not is_logged_in:
threads = []
else:
following_ids = user.get_profile().following.values_list("id", flat=True)
sort_by = "-date"
threads = Thread.objects.all() \
.filter(creator__pk__in=following_ids) \
.annotate(comment_count=Count('comment')) \
.order_by(sort_by)
# Default to showing most recent results
if sort is None:
sort = "recent"
return render("home.html",
{"user":user,
"is_logged_in":user.is_authenticated(),
"threads":threads,
"num_threads":num_threads,
"form":form,
"selected":sort},
request)
def thread(request, thread_id):
"""
Page for a thread. This page will contain all the comments
assosiated with the given thread.
"""
user = request.user
# Get the thread
try:
thread = Thread.objects.get(id=thread_id)
except Thread.DoesNotExist:
raise Http404()
top_comments = Comment.objects.filter(thread=thread, parent=None)\
.order_by("-votes")
structure = [t.get_tree(t) for t in top_comments]
user_voting_data = {"positive":[], "negative":[]}
if user.is_authenticated():
# Get all the ids of the comments that the current user
# voted on. This is needed to highlight the arrows
# of those votes appropriately.
positive_ids = Vote.objects.filter(user=user,
comment__thread=thread,
vote_type=Vote.VOTE_UP) \
.values_list("comment_id", flat=True)
# Get ids of all negative votes
negative_ids = Vote.objects.filter(user=user,
comment__thread=thread,
vote_type=Vote.VOTE_DOWN) \
.values_list("comment_id", flat=True)
user_voting_data = {
"positive":positive_ids,
"negative":negative_ids,
}
return render("thread.html",
{"thread":thread,
"structure":structure,
"user_voting_data":user_voting_data,
"is_logged_in":user.is_authenticated(),
"user":user},
request)
# make this require a POST request
@login_required
@csrf_exempt
def add_comment(request):
"""
Add a new comment.
"""
user = request.user
thread_id = request.POST.get("thread_id")
thread = Thread.objects.get(id=thread_id)
comment_id = request.POST.get("comment_id")
comment = None
if comment_id:
comment = Comment.objects.get(id=comment_id)
form = CommentForm(request.POST)
if form.is_valid():
content = form.cleaned_data["content"]
new_comment = Comment(author=user,
content=content,
parent=comment,
thread=thread)
new_comment.save()
# Redirect back to the thread
return redirect(reverse("thread", kwargs={"thread_id": int(thread_id)}) +
"#first_%s" % comment_id)
@login_required
@csrf_exempt
# require post
def delete(request):
"""
Delete a comment or a thread, based on the request.
"""
user = request.user
obj_type = request.POST.get("type")
obj_id = request.POST.get("_id")
if obj_type == "thread":
# Get the thread
try:
thread = Thread.objects.get(id=int(obj_id))
except Thread.DoesNotExist:
return redirect(reverse("home"))
# We must be the creator of the thread in order to delete it
if thread.creator != user:
return redirect(reverse('home'))
thread.delete()
# Create redirect url
scroll_id = int(obj_id) - 1
append_to_url = "#" + str(scroll_id) if scroll_id > 0 else ""
return redirect(reverse("home") + append_to_url)
return redirect(reverse("home"))
# post required
@login_required
@csrf_exempt
def vote(request):
"""
Register a vote for a comment.
"""
user = request.user
comment_id = request.POST.get("comment_id")
action = request.POST.get("action")
# Get the comment
try:
comment = Comment.objects.get(id=int(comment_id))
except Comment.DoesNotExist:
data = json.dumps({"error":True})
return HttpResponse(data)
# Does a Vote object exist?
try:
vote = Vote.objects.get(user=user,
comment=comment)
except Vote.DoesNotExist:
# We are voting, essentially, for the first time on
# this comment.
vote_type = Vote.VOTE_UP
vote = Vote(user=user,
comment=comment,
vote_type=vote_type)
# Modify the comment's vote count
if action == "up":
comment.votes += 1
else:
comment.votes -= 1
comment.save()
vote.vote_type = vote.VOTE_UP if action == "up" else Vote.VOTE_DOWN
vote.save()
# Return a success response
data = json.dumps({"error":False,
"score":comment.votes,
"color_up":1 if action=="up" else 0,
"color_down":0 if action=="up" else 1})
return HttpResponse(data)
# At this point, a vote exists
vote_type = vote.vote_type
# Up and we Up, after this we are neutral
if vote_type == Vote.VOTE_UP and action == "up":
# This means we want to take back the vote
comment.votes -= 1
comment.save()
# Back to neutral state, delete Vote object
vote.delete()
data = json.dumps({"error":False, "score":comment.votes,
"color_up":0, "color_down":0})
return HttpResponse(data)
# Up and we Down, after this we are Down
if vote_type == Vote.VOTE_UP and action=="down":
comment.votes -= 2
comment.save()
vote.vote_type = Vote.VOTE_DOWN
vote.save()
data = json.dumps({"error":False, "score":comment.votes,
"color_up":0, "color_down":1})
return HttpResponse(data)
# Down and we Down, after this we are neutral
if vote_type == Vote.VOTE_DOWN and action == "down":
# Take back the down vote
comment.votes += 1
comment.save()
vote.delete()
data = json.dumps({"error":False, "score":comment.votes,
"color_up":0, "color_down":0})
return HttpResponse(data)
# Down and we Up, after this we are Up
if vote_type == Vote.VOTE_DOWN and action == "up":
comment.votes += 2
comment.save()
vote.vote_type = Vote.VOTE_UP
vote.save()
data = json.dumps({"error":False, "score":comment.votes,
"color_up":1, "color_down":0})
return HttpResponse(data)
data = json.dumps({"error":False})
return HttpResponse(data)
def user_profile(request, username):
"""
Show the profile page for a user.
"""
user = request.user
# Get the user whose profile we are looking at
try:
profile_user = User.objects.get(username=username)
except User.DoesNotExist:
raise Http404()
is_logged_in = user.is_authenticated()
is_following = False
if is_logged_in:
# Is the logged in user following the user whose
# profile we are looking at?
if user.get_profile().is_following(profile_user):
is_following = True
num_comments = settings.COMMENTS_PER_PROFILE_PAGE
comments = Comment.objects.filter(author=profile_user)
return render("profile.html", {
"user":user,
"profile_user":profile_user,
"comments":comments,
"num_comments":num_comments,
"is_logged_in":is_logged_in,
"is_following":is_following,
"my_profile":user==profile_user,
}, request)
@login_required
def follow(request):
"""
Follow or unfollow a user, based on the request.
"""
user = request.user
profile = user.get_profile()
profile_user_id = request.POST.get("profile_user_id")
action = request.POST.get("action")
# Get the profile user
try:
profile_user = User.objects.get(id=int(profile_user_id))
except User.DoesNotExist:
return redirect(reverse("home"))
if action == "follow":
# Follow user
if not profile.is_following(profile_user):
profile.add_follower(profile_user)
else:
# Unfollow user
if profile.is_following(profile_user):
profile.remove_follower(profile_user)
return redirect(reverse("user", kwargs={"username":profile_user.username}))
def login(request):
"""
Display the login page and log the user in.
"""
next = request.GET.get("next")
# Make absolute url
if next is not None and not next.startswith("/"):
next = "/" + next
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
user = authenticate(username=username,
password=password)
if user is not None:
auth_login(request, user)
if next is not None:
return redirect(next)
else:
return redirect(reverse("home"))
# Incorrect username/password
return render("login.html", {"form":form,
"login_error":True}, request)
# Invalid form
return render("login.html",{"form":form}, request)
else:
assert request.method == "GET"
form = LoginForm()
return render("login.html",
{"form":form},
request)
def register(request):
"""
User registration.
"""
user = request.user
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
# Login as the new user
if new_user.is_active:
username = form.cleaned_data["username"]
password = form.cleaned_data["password1"]
user = authenticate(username=username,
password=password)
auth_login(request, user)
return redirect(reverse("home"))
else:
form = UserCreationForm()
return render("register.html",
{"form":form,
"is_logged_in":user.is_authenticated()},
request)
def logout(request):
"""
Log the user out.
"""
auth_logout(request)
return redirect(reverse("home"))
def about_comminator(request):
"""
About Comminator: help & info page
"""
return render("about_comminator.html", {}, request)
|
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Expansion utility methods
"""
import os
import socket
import pexpect
import time
import datetime
import shutil
import tinctest
from tinctest.lib import run_shell_command
from tinctest.lib import local_path
from tinctest.lib import Gpdiff
from mpp.models import MPPTestCase
from mpp.models import MPPDUT
from mpp.lib.PgHba import PgHba
from gppylib.commands.base import Command, REMOTE
from gppylib.db import dbconn
from gppylib.db.dbconn import UnexpectedRowsError
from gppylib.gparray import GpArray
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.lib.gpfilespace import Gpfilespace
@tinctest.skipLoading('scenario')
class GpExpandTests(MPPTestCase):
def log_and_test_gp_segment_config(self, message="logging gpsc"):
tinctest.logger.info( "[gpsc_out] :" + message)
seg_config = PSQL.run_sql_command("select * from gp_segment_configuration order by dbid", flags ='-q -t')
tinctest.logger.info( seg_config)
if not self.test_gpsc():
self.fail("primary and mirror are on the same host")
def test_gpsc(self):
"""
check if primary and mirror are on the same host
"""
max_segs = self.get_value_from_query("select max(content) from gp_segment_configuration ;")
tinctest.logger.info( max_segs)
ret_flag = True
for i in (range(int(max_segs)+1)):
prim_host = self.get_value_from_query("select hostname from gp_segment_configuration where role='p' and content ='%s';" %(i))
mirr_host = self.get_value_from_query("select hostname from gp_segment_configuration where role='m' and content ='%s';" %(i))
tinctest.logger.info( prim_host)
tinctest.logger.info( mirr_host)
if prim_host == mirr_host:
ret_flag = False
tinctest.logger.info( "mirror and primary are on the same host %s for content id %s" %(prim_host, i))
return ret_flag
def run_expansion(self, mapfile, dbname, mdd=os.environ.get("MASTER_DATA_DIRECTORY"), output_dir=os.environ.get("MASTER_DATA_DIRECTORY"), interview = False, validate=True, validate_gpseg_conf=True,
validate_pghba=True):
"""
Run an expansion test based on the mapping file
"""
outfile = output_dir + "/run_expansion.out"
errfile = output_dir + "/run_expansion.err"
self.log_and_test_gp_segment_config(message="before running epxnasion")
cmd = Command(name='run gpexpand', cmdStr="export MASTER_DATA_DIRECTORY=%s; echo -e \"y\\n\" | gpexpand -i %s -D %s" % (mdd, mapfile, dbname))
tinctest.logger.info("Running expansion setup: %s" %cmd)
try:
cmd.run(validateAfter=validate)
except Exception, e:
self.fail("gpexpand failed. \n %s" %e)
if validate_gpseg_conf:
self._validate_gpseg_conf(mapfile)
if validate_pghba:
# Validate the new entries that will be added after expansion in pg_hba
self._validate_pghba_entries()
results = cmd.get_results()
str_uni_idx ="Tables with unique indexes exist. Until these tables are successfully"
found_uni_idx_msg = False
for line in results.stdout.splitlines():
if line.find(str_uni_idx) != -1:
found_uni_idx_msg = True
if found_uni_idx_msg == False:
tinctest.logger.error("stdout from failed index in expand command: %s" % results.stdout)
tinctest.logger.error("stderr from failed index in expand command: %s" % results.stderr)
self.fail("Message for unique indexes not printed during gpexpand")
with open(outfile, 'w') as output_file:
output_file.write(results.stdout)
with open(errfile, 'w') as output_file:
output_file.write(results.stderr)
self.log_and_test_gp_segment_config(message="after running expansion")
return results
def _validate_gpseg_conf(self, mapfile):
"""
Validate if the new hosts are added to gp_segment_configuration table
Parse the expansion map and populate the datafields into a list
"""
tinctest.logger.info("Verifying expanded segments in gp_segment_configuration table ...")
with open(mapfile) as fp:
for line in fp:
tinctest.logger.info("Checking for segment: %s" %line)
fields = line.split(':')
if len(fields) == 8:
cmd = """select count(*)
from gp_segment_configuration
where hostname = '%s'
and address = '%s'
and port = %s
and content = %s
and role = '%s'
and replication_port = %s""" % (fields[0], fields[1], fields[2],fields[5], fields[6], fields[7])
else:
cmd = """select count(*)
from gp_segment_configuration
where hostname = '%s'
and address = '%s'
and port = %s
and content = %s
and role = '%s'""" % (fields[0], fields[1], fields[2], fields[5], fields[6])
# CHECK FOR DBID ONCE MPP-24082 is RESOLVED
with dbconn.connect(dbconn.DbURL()) as conn:
row = dbconn.execSQLForSingleton(conn, cmd)
if row != 1:
self.log_and_test_gp_segment_config(message="failed gpexpand validation")
self.fail("Expected segment not found in gp_segment_configuration: %s" %line)
def _validate_pghba_entries(self):
"""
Validate if new entries for all the hosts are added to pg_hba.conf files in all the segments
"""
tinctest.logger.info("Verifying pg_hba entries for all segment hosts")
segment_dirs_sql = """select distinct hostname, fselocation, content from gp_segment_configuration, pg_filespace_entry
where content > -1 and fsedbid = dbid"""
dburl = dbconn.DbURL()
pg_hba_files = []
hosts = set()
with dbconn.connect(dburl) as conn:
cursor = dbconn.execSQL(conn, segment_dirs_sql)
try:
for row in cursor.fetchall():
host = row[0]
segment_dir = row[1]
segment_no = row[2]
pg_hba = os.path.join(segment_dir, 'pg_hba.conf')
pg_hba_temp = '/tmp/pg_hba_%s_%s' %(host, segment_no)
if not "cdbfast_fs" in segment_dir and not "filespace" in segment_dir:
# We dont want to do this for filespace entries in pg_filepsace_entry.
# The file space names have prefix cdbfast_fs.
# So if keyword cdbfast_fs appears in the dirname, we skip it"""
if os.path.exists(pg_hba_temp):
os.remove(pg_hba_temp)
cmdstr = 'scp %s:%s %s' %(host, pg_hba, pg_hba_temp)
if not run_shell_command(cmdstr=cmdstr, cmdname='copy over pg_hba'):
raise Exception("Failure while executing command: %s" %cmdstr)
self.assertTrue(os.path.exists(pg_hba_temp))
pg_hba_files.append(pg_hba_temp)
hosts.add(host)
finally:
cursor.close()
for f in pg_hba_files:
tinctest.logger.info("Verifying pg_hba entries for file: %s" %f)
self._verify_host_entries_in_pg_hba(f, hosts)
tinctest.logger.info("Completed verifying pg_hba entries for all segment hosts successfully")
def _verify_host_entries_in_pg_hba(self, filename, hosts):
"""
Verify that a valid trust entry is there in pg_hba for every host in hosts
@param filename: Complete path to the pg_hba file
@type filename: string
@param hosts: A list of hostnames whose entries are expected to be present in pg_hba
@type hosts: set
"""
pg_hba = PgHba(filename)
for host in hosts:
matches = pg_hba.search(type='host', user='all', database='all', address=socket.gethostbyname(host),
authmethod='trust')
self.assertTrue(len(matches) >= 1)
def run_redistribution(self, dbname, output_dir=os.environ.get("MASTER_DATA_DIRECTORY"),mdd=os.environ.get("MASTER_DATA_DIRECTORY"), use_parallel_expansion=False, number_of_parallel_table_redistributed=4, validate=True, validate_redistribution=True):
"""
Run data redistribution
"""
outfile = output_dir +"/run_redistribution.out"
errfile = output_dir +"/run_redistribution.err"
self.log_and_test_gp_segment_config(message="beforer running redistribution")
if use_parallel_expansion:
cmd = Command(name='run gpexpand redistribute', cmdStr="export MASTER_DATA_DIRECTORY=%s; gpexpand -n %s -D %s" %(mdd, number_of_parallel_table_redistributed, dbname))
tinctest.logger.info("Running data redistribution with parallel expansion: %s" %cmd)
else:
cmd = Command(name='run gpexpand redistribute', cmdStr="export MASTER_DATA_DIRECTORY=%s; gpexpand -D %s" %(mdd, dbname))
tinctest.logger.info("Running data redistribution: %s" %cmd)
cmd.run(validateAfter=validate)
if validate_redistribution:
self._validate_redistribution()
results = cmd.get_results()
with open(outfile, 'w') as output_file:
output_file.write(results.stdout)
with open(errfile, 'w') as output_file:
output_file.write(results.stderr)
self.log_and_test_gp_segment_config(message="after running redistribution")
return results
def interview(self, mdd, primary_data_dir, mirror_data_dir, new_hosts, use_host_file, num_new_segs=0, filespace_data_dir="", mapfile="/tmp/gpexpand_input"):
'''
@param new_hosts comma separated list of hostnames
NOTE: The current interview process uses pexpect. It assumes that number_of_expansion_segments is exactly 1 and we are using filespaces
'''
self.log_and_test_gp_segment_config(message="Before interview")
tinctest.logger.info("Expansion host list for interview: %s" %new_hosts)
if use_host_file:
segment_host_file = local_path("new_host_file")
with open(segment_host_file, 'w') as f:
f.write(new_hosts.replace(",", "\n"))
shell_cmd = 'rm -fv gpexpand_inputfile*;export MASTER_DATA_DIRECTORY=%s;gpexpand -f %s -D %s' %(mdd, segment_host_file, os.environ.get('PGDATABASE'))
else:
shell_cmd = 'rm -fv gpexpand_inputfile*;export MASTER_DATA_DIRECTORY=%s;gpexpand -D %s' %(mdd, os.environ.get('PGDATABASE'))
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect ('Would you like to initiate a new System Expansion Yy|Nn (default=N):')
tinctest.logger.info("pexpect 1: %s" %child.before)
child.sendline ('y')
if not use_host_file:
res = child.expect(['Are you sure you want to continue with this gpexpand session',
'Enter a blank line to only add segments to existing hosts'])
# In case some of the cluster is configured with 'localhost',
# the script finds it is not the "standard" configuration.
# We don't care this case because the test is only to verify the
# generated file.
if res == 0:
child.sendline('y')
child.expect('Enter a blank line to only add segments to existing hosts')
tinctest.logger.info("pexpect 2: %s" %child.before)
child.sendline (new_hosts)
child.expect ('What type of mirroring strategy would you like?')
else:
res = child.expect(['Are you sure you want to continue with this gpexpand session',
'What type of mirroring strategy would you like?'])
# In case some of the cluster is configured with 'localhost',
# the script finds it is not the "standard" configuration.
# We don't care this case because the test is only to verify the
# generated file.
if res == 0:
child.sendline('y')
child.expect('What type of mirroring strategy would you like?')
tinctest.logger.info("pexpect 3: %s" %child.before)
child.sendline ('grouped')
child.expect ('How many new primary segments per host do you want to add')
tinctest.logger.info("pexpect 4: %s" %child.before)
child.sendline (str(num_new_segs))
count_filespaces = self.get_value_from_query("select distinct count (*) from gp_persistent_filespace_node;");
for i in range(int(num_new_segs)):
child.expect('Enter new primary data directory')
tinctest.logger.info("pexpect 5: %s" %child.before)
child.sendline (primary_data_dir)
for j in range(int(count_filespaces)):
child.expect('Enter new file space location for file space name')
tinctest.logger.info("pexpect 5: %s" %child.before)
child.sendline (filespace_data_dir+"/filespace_pri_"+str(j))
for i in range(int(num_new_segs)):
child.expect('Enter new mirror data directory')
tinctest.logger.info("pexpect 6: %s" %child.before)
child.sendline (mirror_data_dir)
for j in range(int(count_filespaces)):
child.expect('Enter new file space location for file space name')
tinctest.logger.info("pexpect 5: %s" %child.before)
child.sendline (filespace_data_dir+"/filesapce_mir_"+str(j))
child.expect('Please review the file')
mapfile_interview = ""
mapfile_interview_fs = ""
cur_dir=os.getcwd()
for f in os.listdir(cur_dir):
if f.startswith('gpexpand_inputfile'):
if not f.endswith('.fs'):
mapfile_interview=os.getcwd()+"/"+f
if f.endswith('.fs'):
mapfile_interview_fs=os.getcwd()+"/"+f
tinctest.logger.info("Mapfile generated by interview: %s" %mapfile_interview)
shutil.copyfile(mapfile_interview, mapfile)
if mapfile_interview_fs != "":
shutil.copyfile(mapfile_interview_fs, mapfile+".fs")
def create_filespace_dirs(self, primary_data_dir, mirror_data_dir, filespace_data_dir):
"""
cretes necessary directories for expansion
"""
cmd = "select fsname from pg_filespace where fsname!='pg_system';"
list_of_filespaces = PSQL.run_sql_command(cmd, flags ='-q -t').strip().split("\n ")
segment_host_file = '/tmp/segment_hosts'
for filespaces in list_of_filespaces:
fs_path_pri= os.path.join(primary_data_dir, filespaces ,"primary/")
fs_path_mir= os.path.join(mirror_data_dir, filespaces , "mirror/")
self.create_segment_dirs(fs_path_pri, fs_path_mir,"make filespace prim and mirr dirs" )
count_filespaces = self.get_value_from_query("select distinct count (*) from gp_persistent_filespace_node;");
for j in range(int(count_filespaces)):
fs_path_pri= filespace_data_dir+"/filespace_pri_"+str(j)
fs_path_mir= filespace_data_dir+"/filesapce_mir_"+str(j)
self.create_segment_dirs(fs_path_pri, fs_path_mir,"make filespace dirs" )
def create_segment_dirs(self, fs_path_pri, fs_path_mir, cmd_name):
""" helper method to create dirs """
#The test creates the hostfile in the tmp directory during gpintisytem, assuming that this still exists
segment_host_file = '/tmp/segment_hosts'
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpssh -f %s -e 'mkdir -p %s'" %(segment_host_file, fs_path_pri), cmd_name, res)
if res['rc'] > 0:
raise Exception("Failed to create directories on segments")
run_shell_command("gpssh -f %s -e 'mkdir -p %s'" %(segment_host_file, fs_path_mir), cmd_name, res)
if res['rc'] > 0:
raise Exception("Failed to create directories on segments")
def check_help_file(self):
# since gpexpand --help output differs for 4.2.x & 4.3.x we have to use different ans files
# we invoke the MPPDUT object to get product & version information
dut = MPPDUT()
dut._get_product_version()
ans_file_ver = ''
if dut.product == 'gpdb' and dut.version_string.find("4.2") > -1:
ans_file_ver = '_4.2'
ans_file=local_path("helptext_expected%s" %ans_file_ver)
out_file=local_path("helptext_output_"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')+".out")
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command('gpexpand --help > %s 2>&1' %out_file, 'gpexpand help', res)
diff_res = Gpdiff.are_files_equal(out_file,ans_file)
if not diff_res:
self.fail("differences encountered in help files : %s %s" %(ans_file, out_file))
def mirror_and_catalog_validation(self):
'''
@summary :gpcheckcat and gpcheckmirrorintegrity
'''
###psql.run_shell_command("CHECKPOINT; CHECKPOINT; CHECKPOINT;CHECKPOINT; CHECKPOINT;")
###sleep(30) # sleep for some time for the segments to be in sync before validation
self.dbstate = DbStateClass('run_validation')
tinctest.logger.info("running gpcheckcat and gpcheckmirrorintegrity")
outfile = local_path("gpcheckcat_"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')+".out")
self.dbstate.check_catalog(outputFile=outfile)
self.dbstate.check_mirrorintegrity()
def get_value_from_query(self, sql_cmd):
res = PSQL.run_sql_command(sql_cmd, flags ='-q -t')
res = res.replace("\n", "")
res = res.rstrip()
res = res.lstrip()
#cannot use res.strip because we are using this to get timestamp also and we dont want to remove the space between date and time
return res
def run_redistribution_with_duration(self, dbname, output_dir=os.environ.get("MASTER_DATA_DIRECTORY"), mdd=os.environ.get("MASTER_DATA_DIRECTORY"), use_end_time = False,validate=True, validate_redistribution=True):
"""
Run data redistribution with duration
There are two aspects
1. The utility redistributes tables until the last table in the schema is successfully marked completed,
2. or until the specified duration or end time is reached.
There are two gpexpand commands for redistribution first command redistributes a part thus validating the second point
and the second command redistributes the remaining with max hours i.e. 60, thus validating the first point
"""
bytes_left = " "
outfile = output_dir +"/run_redistribution_with_duration_1.out"
errfile = output_dir +"/run_redistribution_with_duration_1.err"
if use_end_time:
end_time = self.get_value_from_query("select LOCALTIMESTAMP(0) + interval '4 seconds';");
cmd = Command(name='run gpexpand redistribute', cmdStr="export MASTER_DATA_DIRECTORY=%s; gpexpand -e '%s' -D %s" %(mdd, end_time, dbname))
tinctest.logger.info("Running data redistribution with end_time: %s" %cmd)
else:
cmd = Command(name='run gpexpand redistribute', cmdStr="export MASTER_DATA_DIRECTORY=%s; gpexpand -d 00:00:03 -D %s" %(mdd, dbname))
tinctest.logger.info("Running data redistribution with duration: %s" %cmd)
cmd.run(validateAfter=validate)
results = cmd.get_results()
with open(outfile, 'w') as output_file:
output_file.write(results.stdout)
with open(errfile, 'w') as output_file:
output_file.write(results.stderr)
sql_cmd ="select value from gpexpand.expansion_progress where name='Bytes Left';"
bytes_left = PSQL.run_sql_command(sql_cmd, flags ='-q -t')
tables_compteted = PSQL.run_sql_command("select count(*) from gpexpand.status_detail where status='COMPLETED';", flags ='-q -t')
if bytes_left == " " or tables_compteted == 0:
self.fail("Either all or none of the tables were redistributed in the first stage of redistribution_with_duration. Bytes left: %s, tables compteted: %s" %(bytes_left, tables_compteted))
#Make sure that the work load does not have large files to avoid the redistribution starting with a bigger table that cannot complete within 3 seconds.
#need to add check here to count the rows in all tables
outfile = output_dir +"/run_redistribution_with_duration_2.out"
errfile = output_dir +"/run_redistribution_with_duration_2.err"
cmd = Command(name='run gpexpand redistribute', cmdStr="export MASTER_DATA_DIRECTORY=%s; gpexpand -d 60:00:00 -D %s" %(mdd, dbname))
tinctest.logger.info("Running data redistribution with duration 60:00:00 to redistrubute remainign data: %s" %cmd)
cmd.run(validateAfter=validate)
if validate_redistribution:
self._validate_redistribution()
results = cmd.get_results()
with open(outfile, 'w') as output_file:
output_file.write(results.stdout)
with open(errfile, 'w') as output_file:
output_file.write(results.stderr)
self.log_and_test_gp_segment_config(message="after running redistribution")
return results
def check_number_of_parallel_tables_expanded(self, number_of_parallel_table_redistributed):
tinctest.logger.info("in check_number_of_parallel_tables_expanded")
tables_in_progress = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='IN PROGRESS';")
tables_not_started = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='NOT STARTED';")
tables_completed = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='COMPLETED';")
count = 0
max_parallel = -1
prev_count = int(number_of_parallel_table_redistributed)
while int(tables_in_progress) != int(number_of_parallel_table_redistributed) and int(tables_completed)<int(number_of_parallel_table_redistributed):
tables_in_progress = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='IN PROGRESS';")
tables_completed = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='COMPLETED';")
tinctest.logger.info("waiting to reach desired number of parallel redistributions \ntables_completed : " + tables_completed)
tinctest.logger.info("tables_in_progress :"+ tables_in_progress)
if int(tables_in_progress) > max_parallel:
max_parallel = int(tables_in_progress)
if max_parallel < int(number_of_parallel_table_redistributed):
self.fail("The specified value was never reached.")
while True :
tables_in_progress = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='IN PROGRESS';")
tables_completed = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='COMPLETED';")
tinctest.logger.info("Redistributing in parallel - tables_completed : " + tables_completed)
tinctest.logger.info("Redistributing in parallel - tables_in_progress :"+ tables_in_progress)
if int(tables_in_progress) > prev_count:
self.fail("The number of parallel tables being redistributed was not stable")
count = count +1
prev_count = int(tables_in_progress)
if int(tables_in_progress) == 0 and int(tables_completed) == int(number_of_parallel_table_redistributed):
break
tables_in_progress = self.get_value_from_query("select count(*) from gpexpand.status_detail where status='IN PROGRESS';")
sql_cmd = "select * from gpexpand.status_detail"
res = PSQL.run_sql_command(sql_cmd, out_file = "/data/gpexpand_psql.out", flags ='-q -t')
if int(tables_in_progress) != 0:
self.fail("Tables currently being redistributed in parallel is not as specified: In progress tables found %s" %(tables_in_progress))
def _validate_redistribution(self):
"""
This validates whether or not all the tables in all the databases are distributed across all the segments.
Assumption: Assumes that there are at least enough rows in each table to get them distributed across all the segments.
"""
return
def cleanup_expansion(self, dbname, output_dir=os.environ.get("MASTER_DATA_DIRECTORY"), mdd=os.environ.get("MASTER_DATA_DIRECTORY")):
"""
Run gpexpand to cleanup the expansion catalog
"""
outfile = output_dir +"/cleanup_expansion.out"
errfile = output_dir +"/cleanup_expansion.err"
self.log_and_test_gp_segment_config(message="after running redistribution")
cmd = Command(name='run gpexpand cleanup',
cmdStr='export MASTER_DATA_DIRECTORY=%s; echo -e \"y\\n\" | gpexpand -c -D %s' % (mdd, dbname))
tinctest.logger.info("Running expansion cleanup ...: %s" %cmd)
cmd.run(validateAfter=True)
results = cmd.get_results()
with open(outfile, 'w') as output_file:
output_file.write(results.stdout)
with open(errfile, 'w') as output_file:
output_file.write(results.stderr)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
try:
query = "SELECT count(*) FROM information_schema.schemata where schema_name='gpexpand';"
tinctest.logger.info("Executing query %s" %query)
row = dbconn.execSQLForSingleton(conn, query)
except UnexpectedRowsError, e:
tinctest.logger.exception(e)
raise Exception("Exception while executing query: %s" %query)
self.assertEquals(row, 0, "Cleanup of expansion failed")
self.log_and_test_gp_segment_config(message="after cleanup")
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.tasks_v2beta2.proto import (
cloudtasks_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
queue_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
task_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudTasksStub(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListQueues = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListQueues",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.FromString,
)
self.GetQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.CreateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.UpdateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/UpdateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.DeleteQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PurgeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PurgeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.PauseQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PauseQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.ResumeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ResumeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
self.ListTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.FromString,
)
self.GetTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CreateTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.DeleteTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LeaseTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/LeaseTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.FromString,
)
self.AcknowledgeTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/AcknowledgeTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RenewLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RenewLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CancelLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CancelLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.RunTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RunTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
class CloudTasksServicer(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def ListQueues(self, request, context):
"""Lists queues.
Queues are returned in lexicographical order.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetQueue(self, request, context):
"""Gets a queue.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateQueue(self, request, context):
"""Creates a queue.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateQueue(self, request, context):
"""Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteQueue(self, request, context):
"""Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be created
for 7 days.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PurgeQueue(self, request, context):
"""Purges a queue by deleting all of its tasks.
All tasks created before this method is called are permanently deleted.
Purge operations can take up to one minute to take effect. Tasks
might be dispatched before the purge takes effect. A purge is irreversible.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PauseQueue(self, request, context):
"""Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
when the queue is paused. A queue is paused if its
[state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ResumeQueue(self, request, context):
"""Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can
lead to target overloading. If you are resuming high-QPS
queues, follow the 500/50/5 pattern described in
[Managing Cloud Tasks Scaling
Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
Returns an empty policy if the resource exists and does not have a policy
set.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.getIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM permissions yet.
Project-level permissions are required to use the Cloud Console.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.setIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
If the resource does not exist, this will return an empty set of
permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTasks(self, request, context):
"""Lists the tasks in a queue.
By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
due to performance considerations;
[response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
subset of information which is returned.
The tasks may be returned in any order. The ordering may change at any
time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTask(self, request, context):
"""Gets a task.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTask(self, request, context):
"""Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask command.
* For [App Engine queues][google.cloud.tasks.v2beta2.AppEngineHttpTarget], the maximum task size is
100KB.
* For [pull queues][google.cloud.tasks.v2beta2.PullTarget], the maximum task size is 1MB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTask(self, request, context):
"""Deletes a task.
A task can be deleted if it is scheduled or dispatched. A task
cannot be deleted if it has completed successfully or permanently
failed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LeaseTasks(self, request, context):
"""Leases tasks from a pull queue for
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration].
This method is invoked by the worker to obtain a lease. The
worker must acknowledge the task via
[AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
performed the work associated with the task.
The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
the worker needs to perform the work associated with the task. To
return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
[response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
[FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
requests are allowed per
queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is returned when this limit is
exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is also returned when
[max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second]
is exceeded.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AcknowledgeTask(self, request, context):
"""Acknowledges a pull task.
The worker, that is, the entity that
[leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
to indicate that the work associated with the task has finished.
The worker must acknowledge a task within the
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
will expire and the task will become available to be leased
again. After the task is acknowledged, it will not be returned
by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
[GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
[ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RenewLease(self, request, context):
"""Renew the current lease of a pull task.
The worker can use this method to extend the lease by a new
duration, starting from now. The new task lease will be
returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelLease(self, request, context):
"""Cancel a pull task's lease.
The worker can use this method to cancel a task's lease by
setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
make the task available to be leased to the next caller of
[LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunTask(self, request, context):
"""Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task, even if
the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
task after a fix has been made or to manually force a task to be
dispatched now.
The dispatched task is returned. That is, the task that is returned
contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
[pull task][google.cloud.tasks.v2beta2.PullMessage].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_CloudTasksServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListQueues": grpc.unary_unary_rpc_method_handler(
servicer.ListQueues,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.SerializeToString,
),
"GetQueue": grpc.unary_unary_rpc_method_handler(
servicer.GetQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"CreateQueue": grpc.unary_unary_rpc_method_handler(
servicer.CreateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"UpdateQueue": grpc.unary_unary_rpc_method_handler(
servicer.UpdateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"DeleteQueue": grpc.unary_unary_rpc_method_handler(
servicer.DeleteQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"PurgeQueue": grpc.unary_unary_rpc_method_handler(
servicer.PurgeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"PauseQueue": grpc.unary_unary_rpc_method_handler(
servicer.PauseQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"ResumeQueue": grpc.unary_unary_rpc_method_handler(
servicer.ResumeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
"ListTasks": grpc.unary_unary_rpc_method_handler(
servicer.ListTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.SerializeToString,
),
"GetTask": grpc.unary_unary_rpc_method_handler(
servicer.GetTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CreateTask": grpc.unary_unary_rpc_method_handler(
servicer.CreateTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"DeleteTask": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LeaseTasks": grpc.unary_unary_rpc_method_handler(
servicer.LeaseTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.SerializeToString,
),
"AcknowledgeTask": grpc.unary_unary_rpc_method_handler(
servicer.AcknowledgeTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"RenewLease": grpc.unary_unary_rpc_method_handler(
servicer.RenewLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CancelLease": grpc.unary_unary_rpc_method_handler(
servicer.CancelLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"RunTask": grpc.unary_unary_rpc_method_handler(
servicer.RunTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.tasks.v2beta2.CloudTasks", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
|
# -*- coding: utf-8 -*-
import sys
from getpass import getpass
import argparse
import pkg_resources
import pyperclip
from .generator import *
from .utils import *
from .constants import *
def show_password_entropy(passwd, word_list):
"""Displays the password entropy calculation results."""
entropy = calculate_entropy(passwd, dict_set=word_list)
print("\nPassword length: %d characters" % len(passwd))
print("\nEntropy")
print("-------")
for charset, charset_name in list(PASSWORD_CHARSET_NAMES):
print(("{:<%d}" % LONGEST_CHARSET_NAME_LEN).format(charset_name) + " : " +
(("%.6f" % entropy[charset]) if charset in entropy else "not in character set"))
print("")
def main():
"""Main routine for handling command line functionality for passwdgen."""
version = pkg_resources.require("passwdgen")[0].version
parser = argparse.ArgumentParser(description="A password generation utility (v%s)." % version)
subparsers = parser.add_subparsers(help="The command to execute.", dest="command")
parser_info = subparsers.add_parser(
"info",
help=(
"Compute information about a password. If passwdgen has input piped into it via stdin, that " +
"will be interpreted as the password."
)
)
parser_info.add_argument(
"-d", "--dictionary",
default=None,
help="Path to the dictionary file to use. This must be a plain text file with one word per line."
)
parser_info.add_argument(
"-e", "--encoding",
default=None,
help=(
"The encoding to use when read/writing input/output files. " +
"(See https://docs.python.org/2/library/codecs.html#standard-encodings)"
)
)
parser_generate = subparsers.add_parser(
"generate",
help="Generate password(s)."
)
parser_generate.add_argument(
"-c", "--clipboard",
action="store_true",
help=(
"Copy the generated password to the clipboard (only for when generating a single password) instead of "+
"writing the password to stdout"
)
)
parser_generate.add_argument(
"-d", "--dictionary",
default=None,
help="Path to the dictionary file to use. This must be a plain text file with one word per line."
)
parser_generate.add_argument(
"-e", "--encoding",
default=None,
help=(
"The encoding to use when read/writing input/output files. " +
"(See https://docs.python.org/2/library/codecs.html#standard-encodings)"
)
)
parser_generate.add_argument(
"-i", "--info",
action="store_true",
help="Additionally display information about the generated password, including password entropy."
)
parser_generate.add_argument(
"-l", "--length",
type=int,
default=None,
help=(
"The default number of characters or words to generate, depending on which kind of password " +
"is being generated (a character- or dictionary-based one). Defaults: %d characters or %d words."
) % (DEFAULT_CHAR_PASSWORD_LENGTH, DEFAULT_WORD_PASSWORD_WORDS)
)
parser_generate.add_argument(
"-m", "--min-entropy",
default=None,
type=int,
help="The minimum entropy of the required password (optional). If length is specified, this will be ignored."
)
parser_generate.add_argument(
"-s", "--separator",
choices=PASSWORD_SEPARATOR_IDS,
default=SEP_DASH,
help=(
"The separator to use when generating passwords from dictionaries (default=%s)."
) % SEP_DASH
)
parser_generate.add_argument(
"--starting-letters",
default=None,
help=(
"The letters to use as initials for the generated words."
)
)
parser_generate.add_argument(
"-t", "--charset",
choices=PASSWORD_CHARSET_IDS,
default=PC_DICT,
help=(
"Which character set/approach to use when generating the password (default=\"%s\"). See the " +
"README.md file at https://github.com/thanethomson/passwdgen for more details."
) % PC_DICT
)
parser_rng = subparsers.add_parser(
"rng",
help="Test the quality of the operating system's random number generator."
)
parser_rng.add_argument(
"-s", "--sample-size",
type=int,
default=1000000,
help="Define the sample size to test with (default = 1,000,000)."
)
subparsers.add_parser(
"version",
help="Display the version of passwdgen and exit."
)
parser_wordlist = subparsers.add_parser(
"wordlist",
help="Utilities relating to word list manipulation."
)
subparsers_wordlist = parser_wordlist.add_subparsers(dest="wordlist_subcommand")
parser_wordlist_clean = subparsers_wordlist.add_parser(
"clean",
help="Cleans up a given word list, stripping punctuation, digits and whitespace."
)
parser_wordlist_clean.add_argument(
"input_file",
help="The input text file, one word per line, to be cleaned."
)
parser_wordlist_clean.add_argument(
"output_file",
help="The output file into which to write the cleaned word list."
)
parser_wordlist_clean.add_argument(
"-e", "--encoding",
default=None,
help=(
"The encoding to use when read/writing input/output files. " +
"(See https://docs.python.org/2/library/codecs.html#standard-encodings)"
)
)
args = parser.parse_args()
if args.command == "version":
print("passwdgen v%s" % version)
elif args.command == "info":
if sys.stdin.isatty():
passwd = getpass("Please enter the password to check: ")
else:
# if the input's been piped in
passwd = sys.stdin.read()
# strip off the single trailing newline
if passwd.endswith("\n"):
passwd = passwd[:-1]
word_list = load_word_list(filename=args.dictionary, encoding=args.encoding)
show_password_entropy(passwd, word_list)
elif args.command == "rng":
print("Testing OS RNG. Attempting to generate %d samples between 0 and 100 (inclusive). Please wait..." % args.sample_size)
result = secure_random_quality(args.sample_size)
print("\nStatistics")
print("----------")
print("Mean : %.6f (should approach %.3f as the sample size increases; %.3f%% difference)" % (
result['mean'],
result['expected_mean'],
result['mean_diff']
))
print("Standard deviation : %.6f (should be as close to %.6f as possible; %.3f%% difference)" % (
result['stddev'],
result['expected_stddev'],
result['stddev_diff']
))
print("Time taken : %.3f seconds\n" % result['time'])
elif args.command == "generate":
try:
word_list = load_word_list(filename=args.dictionary, encoding=args.encoding)
# dictionary-based password generation
if args.charset == PC_DICT:
# load our dictionary
passwd = words(
word_list,
separator=PASSWORD_SEPARATORS[args.separator],
word_count=args.length,
min_entropy=args.min_entropy,
starting_letters=args.starting_letters
)
else:
passwd = chars(
args.charset,
length=args.length,
min_entropy=args.min_entropy
)
if args.clipboard:
pyperclip.copy(passwd)
print("Password copied to clipboard.")
else:
print(passwd)
if args.info:
show_password_entropy(passwd, word_list)
except ValueError as e:
print("Error: %s" % e)
elif args.command == "wordlist":
if args.wordlist_subcommand == "clean":
print("Attempting to clean word list: %s" % args.input_file)
result = clean_word_list(
args.input_file,
args.output_file,
encoding=args.encoding
)
print("Cleaned file in %.3f seconds. Read %d words, wrote %d." % (
result["time"],
result["words_read"],
result["words_written"]
))
|
|
import json
import logging
import sys
import traceback
from uuid import UUID
from django.core.urlresolvers import resolve
from silk import models
from silk.collector import DataCollector
from silk.config import SilkyConfig
Logger = logging.getLogger('silk')
content_types_json = ['application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json']
content_type_form = ['multipart/form-data',
'application/x-www-form-urlencoded']
content_type_html = ['text/html']
content_type_css = ['text/css']
class DefaultEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, UUID):
return str(o)
def _parse_content_type(content_type):
"""best efforts on pulling out the content type and encoding from Content-Type header"""
try:
content_type = content_type.strip()
except AttributeError:
pass
char_set = None
if content_type.strip():
splt = content_type.split(';')
content_type = splt[0]
try:
raw_char_set = splt[1].strip()
key, char_set = raw_char_set.split('=')
if key != 'charset':
char_set = None
except (IndexError, ValueError):
pass
return content_type, char_set
class RequestModelFactory(object):
"""Produce Request models from Django request objects"""
def __init__(self, request):
super(RequestModelFactory, self).__init__()
self.request = request
def content_type(self):
content_type = self.request.META.get('CONTENT_TYPE', '')
return _parse_content_type(content_type)
def encoded_headers(self):
"""
From Django docs (https://docs.djangoproject.com/en/1.6/ref/request-response/#httprequest-objects):
"With the exception of CONTENT_LENGTH and CONTENT_TYPE, as given above, any HTTP headers in the request are converted to
META keys by converting all characters to uppercase, replacing any hyphens with underscores and adding an HTTP_ prefix
to the name. So, for example, a header called X-Bender would be mapped to the META key HTTP_X_BENDER."
"""
headers = {}
for k, v in self.request.META.items():
if k.startswith('HTTP') or k in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
splt = k.split('_')
if splt[0] == 'HTTP':
splt = splt[1:]
k = '-'.join(splt)
headers[k] = v
if SilkyConfig().SILKY_HIDE_COOKIES:
try:
del headers['COOKIE']
except KeyError:
pass
return json.dumps(headers, cls=DefaultEncoder)
def _body(self, raw_body, content_type):
"""
Encode body as JSON if possible so can be used as a dictionary in generation
of curl/django test client code
"""
body = ''
if content_type in content_type_form:
body = self.request.POST
body = json.dumps(dict(body), sort_keys=True, indent=4)
elif content_type in content_types_json:
try:
body = json.dumps(json.loads(raw_body), sort_keys=True, indent=4)
except:
body = raw_body
return body
def body(self):
content_type, char_set = self.content_type()
raw_body = self.request.body
if char_set:
try:
raw_body = raw_body.decode(char_set)
except AttributeError:
pass
except LookupError: # If no encoding exists, default to UTF-8
try:
raw_body = raw_body.decode('UTF-8')
except AttributeError:
pass
except UnicodeDecodeError:
raw_body = ''
except Exception as e:
Logger.error('Unable to decode request body using char_set %s due to error: %s. Will ignore. Stacktrace:' % (char_set, e))
traceback.print_exc()
else:
# Default to an attempt at UTF-8 decoding.
try:
raw_body = raw_body.decode('UTF-8')
except AttributeError:
pass
except UnicodeDecodeError:
raw_body = ''
max_size = SilkyConfig().SILKY_MAX_REQUEST_BODY_SIZE
body = ''
if raw_body:
if max_size > -1:
Logger.debug('A max request size is set so checking size')
size = sys.getsizeof(raw_body, default=None)
request_identifier = self.request.path
if not size:
Logger.error('No way in which to get size of request body for %s, will ignore it', request_identifier)
elif size <= max_size:
Logger.debug('Request %s has body of size %d which is less than %d so will save the body' % (request_identifier, size, max_size))
body = self._body(raw_body, content_type)
else:
Logger.debug('Request %s has body of size %d which is greater than %d, therefore ignoring' % (request_identifier, size, max_size))
raw_body = None
else:
Logger.debug('No maximum request body size is set, continuing.')
body = self._body(raw_body, content_type)
return body, raw_body
def query_params(self):
query_params = self.request.GET
encoded_query_params = ''
if query_params:
query_params_dict = dict(zip(query_params.keys(), query_params.values()))
encoded_query_params = json.dumps(query_params_dict)
return encoded_query_params
def construct_request_model(self):
body, raw_body = self.body()
query_params = self.query_params()
path = self.request.path
resolved = resolve(path)
try:
# view_name is set in Django >= 1.8
view_name = resolved.view_name
except AttributeError:
# support for Django 1.6 and 1.7 in which no view_name is set
view_name = resolved.url_name
namespace = resolved.namespace
if namespace:
view_name = namespace + ':' + view_name
request_model = models.Request.objects.create(
path=path,
encoded_headers=self.encoded_headers(),
method=self.request.method,
query_params=query_params,
view_name=view_name,
body=body)
# Text fields are encoded as UTF-8 in Django and hence will try to coerce
# anything to we pass to UTF-8. Some stuff like binary will fail.
try:
request_model.raw_body = raw_body
except UnicodeDecodeError:
Logger.debug('NYI: Binary request bodies') # TODO
Logger.debug('Created new request model with pk %s' % request_model.pk)
return request_model
class ResponseModelFactory(object):
"""given a response object, craft the silk response model"""
def __init__(self, response):
super(ResponseModelFactory, self).__init__()
self.response = response
self.request = DataCollector().request
def body(self):
body = ''
content_type, char_set = _parse_content_type(self.response.get('Content-Type', ''))
content = getattr(self.response, 'content', '')
if char_set and content:
try:
content = content.decode(char_set)
except AttributeError:
pass
except LookupError: # If no encoding exists, default to UTF-8
try:
content = content.decode('UTF-8')
except AttributeError:
pass
except UnicodeDecodeError:
content = ''
except Exception as e:
Logger.error('Unable to decode response body using char_set %s due to error: %s. Will ignore. Stacktrace:' % (char_set, e))
traceback.print_exc()
else:
# Default to an attempt at UTF-8 decoding.
try:
content = content.decode('UTF-8')
except AttributeError:
pass
except UnicodeDecodeError:
content = ''
if content:
max_body_size = SilkyConfig().SILKY_MAX_RESPONSE_BODY_SIZE
if max_body_size > -1:
Logger.debug('Max size of response body defined so checking')
size = sys.getsizeof(content, None)
if not size:
Logger.error('Could not get size of response body. Ignoring')
content = ''
else:
if size > max_body_size:
content = ''
Logger.debug('Size of %d for %s is bigger than %d so ignoring response body' % (size, self.request.path, max_body_size))
else:
Logger.debug('Size of %d for %s is less than %d so saving response body' % (size, self.request.path, max_body_size))
if content_type in content_types_json:
# TODO: Perhaps theres a way to format the JSON without parsing it?
try:
body = json.dumps(json.loads(content), sort_keys=True, indent=4)
except (TypeError, ValueError):
Logger.warn('Response to request with pk %s has content type %s but was unable to parse it' % (self.request.pk, content_type))
return body, content
def construct_response_model(self):
assert self.request, 'Cant construct a response model if there is no request model'
Logger.debug('Creating response model for request model with pk %s' % self.request.pk)
b, content = self.body()
raw_headers = self.response._headers
headers = {}
for k, v in raw_headers.items():
try:
header, val = v
except ValueError:
header, val = k, v
finally:
headers[header] = val
silky_response = models.Response.objects.create(request=self.request,
status_code=self.response.status_code,
encoded_headers=json.dumps(headers),
body=b)
# Text fields are encoded as UTF-8 in Django and hence will try to coerce
# anything to we pass to UTF-8. Some stuff like binary will fail.
try:
silky_response.raw_body = content
except UnicodeDecodeError:
Logger.debug('NYI: Saving of binary response body') # TODO
return silky_response
|
|
from __future__ import unicode_literals
"""Benchmark for SQLAlchemy.
An adaptation of Robert Brewers' ZooMark speed tests. """
import datetime
import sys
import time
from sqlalchemy import *
from sqlalchemy.testing import fixtures, engines, profiling
from sqlalchemy import testing
ITERATIONS = 1
dbapi_session = engines.ReplayableSession()
metadata = None
class ZooMarkTest(fixtures.TestBase):
"""Runs the ZooMark and squawks if method counts vary from the norm.
Each test has an associated `call_range`, the total number of
accepted function calls made during the test. The count can vary
between Python 2.4 and 2.5.
Unlike a unit test, this is a ordered collection of steps. Running
components individually will fail.
"""
__requires__ = 'cpython',
__only_on__ = 'postgresql+psycopg2'
__skip_if__ = lambda : sys.version_info < (2, 5),
def test_baseline_0_setup(self):
global metadata
creator = testing.db.pool._creator
recorder = lambda : dbapi_session.recorder(creator())
engine = engines.testing_engine(options={'creator': recorder,
'use_reaper':False})
metadata = MetaData(engine)
engine.connect()
def test_baseline_1_create_tables(self):
Zoo = Table(
'Zoo',
metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
Column('Founded', Date),
Column('Opens', Time),
Column('LastEscape', DateTime),
Column('Admission', Float),
)
Animal = Table(
'Animal',
metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
Column('Name', Unicode(100)),
Column('Species', Unicode(100)),
Column('Legs', Integer, default=4),
Column('LastEscape', DateTime),
Column('Lifespan', Float(4)),
Column('MotherID', Integer, ForeignKey('Animal.ID')),
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
metadata.create_all()
def test_baseline_1a_populate(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
engine = metadata.bind
wap = engine.execute(Zoo.insert(), Name='Wild Animal Park',
Founded=datetime.date(2000, 1, 1),
Opens=datetime.time(8, 15, 59),
LastEscape=
datetime.datetime(2004, 7, 29, 5, 6, 7),
Admission=4.95).inserted_primary_key[0]
sdz = engine.execute(Zoo.insert(), Name='San Diego Zoo',
Founded=datetime.date(1935, 9, 13),
Opens=datetime.time(9, 0, 0),
Admission=0).inserted_primary_key[0]
engine.execute(Zoo.insert(inline=True), Name='Montr\xe9al Biod\xf4me',
Founded=datetime.date(1992, 6, 19),
Opens=datetime.time(9, 0, 0), Admission=11.75)
seaworld = engine.execute(Zoo.insert(), Name='Sea_World',
Admission=60).inserted_primary_key[0]
# Let's add a crazy futuristic Zoo to test large date values.
lp = engine.execute(Zoo.insert(), Name='Luna Park',
Founded=datetime.date(2072, 7, 17),
Opens=datetime.time(0, 0, 0),
Admission=134.95).inserted_primary_key[0]
# Animals
leopardid = engine.execute(Animal.insert(), Species='Leopard',
Lifespan=73.5).inserted_primary_key[0]
engine.execute(Animal.update(Animal.c.ID == leopardid), ZooID=wap,
LastEscape=datetime.datetime( 2004, 12, 21, 8, 15, 0, 999907,)
)
lion = engine.execute(Animal.insert(), Species='Lion',
ZooID=wap).inserted_primary_key[0]
engine.execute(Animal.insert(), Species='Slug', Legs=1, Lifespan=.75)
tiger = engine.execute(Animal.insert(), Species='Tiger',
ZooID=sdz).inserted_primary_key[0]
# Override Legs.default with itself just to make sure it works.
engine.execute(Animal.insert(inline=True), Species='Bear', Legs=4)
engine.execute(Animal.insert(inline=True), Species='Ostrich', Legs=2,
Lifespan=103.2)
engine.execute(Animal.insert(inline=True), Species='Centipede',
Legs=100)
emp = engine.execute(Animal.insert(), Species='Emperor Penguin',
Legs=2, ZooID=seaworld).inserted_primary_key[0]
adelie = engine.execute(Animal.insert(), Species='Adelie Penguin',
Legs=2, ZooID=seaworld).inserted_primary_key[0]
engine.execute(Animal.insert(inline=True), Species='Millipede',
Legs=1000000, ZooID=sdz)
# Add a mother and child to test relationships
bai_yun = engine.execute(Animal.insert(), Species='Ape',
Name='Bai Yun', Legs=2).inserted_primary_key[0]
engine.execute(Animal.insert(inline=True), Species='Ape',
Name='Hua Mei', Legs=2, MotherID=bai_yun)
def test_baseline_2_insert(self):
Animal = metadata.tables['Animal']
i = Animal.insert(inline=True)
for x in range(ITERATIONS):
tick = i.execute(Species='Tick', Name='Tick %d' % x,
Legs=8)
def test_baseline_3_properties(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
engine = metadata.bind
def fullobject(select):
"""Iterate over the full result row."""
return list(engine.execute(select).first())
for x in range(ITERATIONS):
# Zoos
WAP = fullobject(Zoo.select(Zoo.c.Name
== 'Wild Animal Park'))
SDZ = fullobject(Zoo.select(Zoo.c.Founded
== datetime.date(1935, 9, 13)))
Biodome = fullobject(Zoo.select(Zoo.c.Name
== 'Montr\xe9al Biod\xf4me'))
seaworld = fullobject(Zoo.select(Zoo.c.Admission
== float(60)))
# Animals
leopard = fullobject(Animal.select(Animal.c.Species
== 'Leopard'))
ostrich = fullobject(Animal.select(Animal.c.Species
== 'Ostrich'))
millipede = fullobject(Animal.select(Animal.c.Legs
== 1000000))
ticks = fullobject(Animal.select(Animal.c.Species == 'Tick'
))
def test_baseline_4_expressions(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
engine = metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
return [list(row) for row in engine.execute(select).fetchall()]
for x in range(ITERATIONS):
assert len(fulltable(Zoo.select())) == 5
assert len(fulltable(Animal.select())) == ITERATIONS + 12
assert len(fulltable(Animal.select(Animal.c.Legs == 4))) \
== 4
assert len(fulltable(Animal.select(Animal.c.Legs == 2))) \
== 5
assert len(fulltable(Animal.select(and_(Animal.c.Legs >= 2,
Animal.c.Legs < 20)))) == ITERATIONS + 9
assert len(fulltable(Animal.select(Animal.c.Legs > 10))) \
== 2
assert len(fulltable(Animal.select(Animal.c.Lifespan
> 70))) == 2
assert len(fulltable(Animal.select(Animal.c.Species.
startswith('L')))) == 2
assert len(fulltable(Animal.select(Animal.c.Species.
endswith('pede')))) == 2
assert len(fulltable(Animal.select(Animal.c.LastEscape
!= None))) == 1
assert len(fulltable(Animal.select(None
== Animal.c.LastEscape))) == ITERATIONS + 11
# In operator (containedby)
assert len(fulltable(Animal.select(Animal.c.Species.like('%pede%'
)))) == 2
assert len(fulltable(Animal.select(Animal.c.Species.in_(['Lion'
, 'Tiger', 'Bear'])))) == 3
# Try In with cell references
class thing(object):
pass
pet, pet2 = thing(), thing()
pet.Name, pet2.Name = 'Slug', 'Ostrich'
assert len(fulltable(Animal.select(Animal.c.Species.in_([pet.Name,
pet2.Name])))) == 2
# logic and other functions
assert len(fulltable(Animal.select(Animal.c.Species.like('Slug'
)))) == 1
assert len(fulltable(Animal.select(Animal.c.Species.like('%pede%'
)))) == 2
name = 'Lion'
assert len(fulltable(Animal.select(func.length(Animal.c.Species)
== len(name)))) == ITERATIONS + 3
assert len(fulltable(Animal.select(Animal.c.Species.like('%i%'
)))) == ITERATIONS + 7
# Test now(), today(), year(), month(), day()
assert len(fulltable(Zoo.select(and_(Zoo.c.Founded != None,
Zoo.c.Founded
< func.current_timestamp(_type=Date))))) == 3
assert len(fulltable(Animal.select(Animal.c.LastEscape
== func.current_timestamp(_type=Date)))) == 0
assert len(fulltable(Animal.select(func.date_part('year',
Animal.c.LastEscape) == 2004))) == 1
assert len(fulltable(Animal.select(func.date_part('month',
Animal.c.LastEscape) == 12))) == 1
assert len(fulltable(Animal.select(func.date_part('day',
Animal.c.LastEscape) == 21))) == 1
def test_baseline_5_aggregates(self):
Animal = metadata.tables['Animal']
Zoo = metadata.tables['Zoo']
engine = metadata.bind
for x in range(ITERATIONS):
# views
view = engine.execute(select([Animal.c.Legs])).fetchall()
legs = [x[0] for x in view]
legs.sort()
expected = {
'Leopard': 73.5,
'Slug': .75,
'Tiger': None,
'Lion': None,
'Bear': None,
'Ostrich': 103.2,
'Centipede': None,
'Emperor Penguin': None,
'Adelie Penguin': None,
'Millipede': None,
'Ape': None,
'Tick': None,
}
for species, lifespan in engine.execute(select([Animal.c.Species,
Animal.c.Lifespan])).fetchall():
assert lifespan == expected[species]
expected = ['Montr\xe9al Biod\xf4me', 'Wild Animal Park']
e = select([Zoo.c.Name], and_(Zoo.c.Founded != None,
Zoo.c.Founded <= func.current_timestamp(),
Zoo.c.Founded >= datetime.date(1990, 1, 1)))
values = [val[0] for val in engine.execute(e).fetchall()]
assert set(values) == set(expected)
# distinct
legs = [x[0] for x in engine.execute(select([Animal.c.Legs],
distinct=True)).fetchall()]
legs.sort()
def test_baseline_6_editing(self):
Zoo = metadata.tables['Zoo']
engine = metadata.bind
for x in range(ITERATIONS):
# Edit
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'San Diego Zoo'
)).first()
engine.execute(Zoo.update(Zoo.c.ID == SDZ['ID'
]), Name='The San Diego Zoo',
Founded=datetime.date(1900, 1, 1),
Opens=datetime.time(7, 30, 0),
Admission='35.00')
# Test edits
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'The San Diego Zoo'
)).first()
assert SDZ['Founded'] == datetime.date(1900, 1, 1), \
SDZ['Founded']
# Change it back
engine.execute(Zoo.update(Zoo.c.ID == SDZ['ID'
]), Name='San Diego Zoo',
Founded=datetime.date(1935, 9, 13),
Opens=datetime.time(9, 0, 0),
Admission='0')
# Test re-edits
SDZ = engine.execute(Zoo.select(Zoo.c.Name == 'San Diego Zoo'
)).first()
assert SDZ['Founded'] == datetime.date(1935, 9, 13)
def test_baseline_7_multiview(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
engine = metadata.bind
def fulltable(select):
"""Iterate over the full result table."""
return [list(row) for row in engine.execute(select).fetchall()]
for x in range(ITERATIONS):
za = fulltable(select([Zoo.c.ID] + list(Animal.c),
Zoo.c.Name == 'San Diego Zoo',
from_obj=[join(Zoo, Animal)]))
SDZ = Zoo.select(Zoo.c.Name == 'San Diego Zoo')
e = fulltable(select([Zoo.c.ID, Animal.c.ID],
and_(Zoo.c.Name == 'San Diego Zoo',
Animal.c.Species == 'Leopard'),
from_obj=[join(Zoo, Animal)]))
# Now try the same query with INNER, LEFT, and RIGHT JOINs.
e = fulltable(select([Zoo.c.Name, Animal.c.Species],
from_obj=[join(Zoo, Animal)]))
e = fulltable(select([Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Zoo, Animal)]))
e = fulltable(select([Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Animal, Zoo)]))
def test_baseline_8_drop(self):
metadata.drop_all()
# Now, run all of these tests again with the DB-API driver factored
# out: the ReplayableSession playback stands in for the database.
#
# How awkward is this in a unittest framework? Very.
def test_profile_0(self):
global metadata
player = lambda : dbapi_session.player()
engine = create_engine('postgresql:///', creator=player,
use_native_hstore=False)
metadata = MetaData(engine)
engine.connect()
def test_profile_1_create_tables(self):
self.test_baseline_1_create_tables()
@profiling.function_call_count()
def test_profile_1a_populate(self):
self.test_baseline_1a_populate()
@profiling.function_call_count()
def test_profile_2_insert(self):
self.test_baseline_2_insert()
@profiling.function_call_count()
def test_profile_3_properties(self):
self.test_baseline_3_properties()
@profiling.function_call_count()
def test_profile_4_expressions(self):
self.test_baseline_4_expressions()
@profiling.function_call_count()
def test_profile_5_aggregates(self):
self.test_baseline_5_aggregates()
@profiling.function_call_count()
def test_profile_6_editing(self):
self.test_baseline_6_editing()
@profiling.function_call_count()
def test_profile_7_multiview(self):
self.test_baseline_7_multiview()
def test_profile_8_drop(self):
self.test_baseline_8_drop()
|
|
import sonnet as snt
import tensorflow as tf
from luminoth.models.fasterrcnn.rcnn_proposal import RCNNProposal
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
from luminoth.models.fasterrcnn.roi_pool import ROIPoolingLayer
from luminoth.utils.losses import smooth_l1_loss
from luminoth.utils.vars import (
get_initializer, layer_summaries, variable_summaries,
get_activation_function
)
class RCNN(snt.AbstractModule):
"""RCNN: Region-based Convolutional Neural Network.
Given region proposals (bounding boxes on an image) and a feature map of
that image, RCNN adjusts the bounding boxes and classifies each region as
either background or a specific object class.
Steps:
1. Region of Interest Pooling. Extract features from the feature map
(based on the proposals) and convert into fixed size tensors
(applying extrapolation).
2. Two fully connected layers generate a smaller tensor for each
region.
3. A fully conected layer outputs the probability distribution over the
classes (plus a background class), and another fully connected layer
outputs the bounding box regressions (one 4-d regression for each of
the possible classes).
Using the class probability, filter regions classified as background. For
the remaining regions, use the class probability together with the
corresponding bounding box regression offsets to generate the final object
bounding boxes, with classes and probabilities assigned.
"""
def __init__(self, num_classes, config, debug=False, seed=None,
name='rcnn'):
super(RCNN, self).__init__(name=name)
self._num_classes = num_classes
# List of the fully connected layer sizes used before classifying and
# adjusting the bounding box.
self._layer_sizes = config.layer_sizes
self._activation = get_activation_function(config.activation_function)
self._dropout_keep_prob = config.dropout_keep_prob
self._use_mean = config.use_mean
self._variances = config.target_normalization_variances
self._rcnn_initializer = get_initializer(
config.rcnn_initializer, seed=seed
)
self._cls_initializer = get_initializer(
config.cls_initializer, seed=seed
)
self._bbox_initializer = get_initializer(
config.bbox_initializer, seed=seed
)
self.regularizer = tf.contrib.layers.l2_regularizer(
scale=config.l2_regularization_scale)
self._l1_sigma = config.l1_sigma
# Debug mode makes the module return more detailed Tensors which can be
# useful for debugging.
self._debug = debug
self._config = config
self._seed = seed
def _instantiate_layers(self):
# We define layers as an array since they are simple fully connected
# ones and it should be easy to tune it from the network config.
self._layers = [
snt.Linear(
layer_size,
name='fc_{}'.format(i),
initializers={'w': self._rcnn_initializer},
regularizers={'w': self.regularizer},
)
for i, layer_size in enumerate(self._layer_sizes)
]
# We define the classifier layer having a num_classes + 1 background
# since we want to be able to predict if the proposal is background as
# well.
self._classifier_layer = snt.Linear(
self._num_classes + 1, name='fc_classifier',
initializers={'w': self._cls_initializer},
regularizers={'w': self.regularizer},
)
# The bounding box adjustment layer has 4 times the number of classes
# We choose which to use depending on the output of the classifier
# layer
self._bbox_layer = snt.Linear(
self._num_classes * 4, name='fc_bbox',
initializers={'w': self._bbox_initializer},
regularizers={'w': self.regularizer}
)
# ROIPoolingLayer is used to extract the feature from the feature map
# using the proposals.
self._roi_pool = ROIPoolingLayer(self._config.roi, debug=self._debug)
# RCNNTarget is used to define a minibatch and the correct values for
# each of the proposals.
self._rcnn_target = RCNNTarget(
self._num_classes, self._config.target, variances=self._variances,
seed=self._seed
)
# RCNNProposal generates the final bounding boxes and tries to remove
# duplicates.
self._rcnn_proposal = RCNNProposal(
self._num_classes, self._config.proposals,
variances=self._variances
)
def _build(self, conv_feature_map, proposals, im_shape, base_network,
gt_boxes=None, is_training=False):
"""
Classifies & refines proposals based on the pooled feature map.
Args:
conv_feature_map: The feature map of the image, extracted
using the pretrained network.
Shape: (num_proposals, pool_height, pool_width, 512).
proposals: A Tensor with the bounding boxes proposed by the RPN.
Shape: (total_num_proposals, 4).
Encoding: (x1, y1, x2, y2).
im_shape: A Tensor with the shape of the image in the form of
(image_height, image_width).
gt_boxes (optional): A Tensor with the ground truth boxes of the
image.
Shape: (total_num_gt, 5).
Encoding: (x1, y1, x2, y2, label).
is_training (optional): A boolean to determine if we are just using
the module for training or just inference.
Returns:
prediction_dict: a dict with the object predictions.
It should have the keys:
objects:
labels:
probs:
rcnn:
target:
"""
self._instantiate_layers()
prediction_dict = {'_debug': {}}
if gt_boxes is not None:
proposals_target, bbox_offsets_target = self._rcnn_target(
proposals, gt_boxes)
if is_training:
with tf.name_scope('prepare_batch'):
# We flatten to set shape, but it is already a flat Tensor.
in_batch_proposals = tf.reshape(
tf.greater_equal(proposals_target, 0), [-1]
)
proposals = tf.boolean_mask(
proposals, in_batch_proposals)
bbox_offsets_target = tf.boolean_mask(
bbox_offsets_target, in_batch_proposals)
proposals_target = tf.boolean_mask(
proposals_target, in_batch_proposals)
prediction_dict['target'] = {
'cls': proposals_target,
'bbox_offsets': bbox_offsets_target,
}
roi_prediction = self._roi_pool(proposals, conv_feature_map, im_shape)
if self._debug:
# Save raw roi prediction in debug mode.
prediction_dict['_debug']['roi'] = roi_prediction
pooled_features = roi_prediction['roi_pool']
features = base_network._build_tail(
pooled_features, is_training=is_training
)
if self._use_mean:
# We avg our height and width dimensions for a more
# "memory-friendly" Tensor.
features = tf.reduce_mean(features, [1, 2])
# We treat num proposals as batch number so that when flattening we
# get a (num_proposals, flatten_pooled_feature_map_size) Tensor.
flatten_features = tf.contrib.layers.flatten(features)
net = tf.identity(flatten_features)
if is_training:
net = tf.nn.dropout(net, keep_prob=self._dropout_keep_prob)
if self._debug:
prediction_dict['_debug']['flatten_net'] = net
# After flattening we are left with a Tensor of shape
# (num_proposals, pool_height * pool_width * 512).
# The first dimension works as batch size when applied to snt.Linear.
for i, layer in enumerate(self._layers):
# Through FC layer.
net = layer(net)
# Apply activation and dropout.
variable_summaries(
net, 'fc_{}_preactivationout'.format(i), 'reduced'
)
net = self._activation(net)
if self._debug:
prediction_dict['_debug']['layer_{}_out'.format(i)] = net
variable_summaries(net, 'fc_{}_out'.format(i), 'reduced')
if is_training:
net = tf.nn.dropout(net, keep_prob=self._dropout_keep_prob)
cls_score = self._classifier_layer(net)
cls_prob = tf.nn.softmax(cls_score, axis=1)
bbox_offsets = self._bbox_layer(net)
prediction_dict['rcnn'] = {
'cls_score': cls_score,
'cls_prob': cls_prob,
'bbox_offsets': bbox_offsets,
}
# Get final objects proposals based on the probabilty, the offsets and
# the original proposals.
proposals_pred = self._rcnn_proposal(
proposals, bbox_offsets, cls_prob, im_shape)
# objects, objects_labels, and objects_labels_prob are the only keys
# that matter for drawing objects.
prediction_dict['objects'] = proposals_pred['objects']
prediction_dict['labels'] = proposals_pred['proposal_label']
prediction_dict['probs'] = proposals_pred['proposal_label_prob']
if self._debug:
prediction_dict['_debug']['proposal'] = proposals_pred
# Calculate summaries for results
variable_summaries(cls_prob, 'cls_prob', 'reduced')
variable_summaries(bbox_offsets, 'bbox_offsets', 'reduced')
if self._debug:
variable_summaries(pooled_features, 'pooled_features', 'full')
layer_summaries(self._classifier_layer, 'full')
layer_summaries(self._bbox_layer, 'full')
return prediction_dict
def loss(self, prediction_dict):
"""
Returns cost for RCNN based on:
Args:
prediction_dict with keys:
rcnn:
cls_score: shape (num_proposals, num_classes + 1)
Has the class scoring for each the proposals. Classes
are 1-indexed with 0 being the background.
cls_prob: shape (num_proposals, num_classes + 1)
Application of softmax on cls_score.
bbox_offsets: shape (num_proposals, num_classes * 4)
Has the offset for each proposal for each class.
We have to compare only the proposals labeled with the
offsets for that label.
target:
cls_target: shape (num_proposals,)
Has the correct label for each of the proposals.
0 => background
1..n => 1-indexed classes
bbox_offsets_target: shape (num_proposals, 4)
Has the true offset of each proposal for the true
label.
In case of not having a true label (non-background)
then it's just zeroes.
Returns:
loss_dict with keys:
rcnn_cls_loss: The cross-entropy or log-loss of the
classification tasks between then num_classes + background.
rcnn_reg_loss: The smooth L1 loss for the bounding box
regression task to adjust correctly labeled boxes.
"""
with tf.name_scope('RCNNLoss'):
cls_score = prediction_dict['rcnn']['cls_score']
# cls_prob = prediction_dict['rcnn']['cls_prob']
# Cast target explicitly as int32.
cls_target = tf.cast(
prediction_dict['target']['cls'], tf.int32
)
# First we need to calculate the log loss betweetn cls_prob and
# cls_target
# We only care for the targets that are >= 0
not_ignored = tf.reshape(tf.greater_equal(
cls_target, 0), [-1], name='not_ignored')
# We apply boolean mask to score, prob and target.
cls_score_labeled = tf.boolean_mask(
cls_score, not_ignored, name='cls_score_labeled')
# cls_prob_labeled = tf.boolean_mask(
# cls_prob, not_ignored, name='cls_prob_labeled')
cls_target_labeled = tf.boolean_mask(
cls_target, not_ignored, name='cls_target_labeled')
tf.summary.scalar(
'batch_size',
tf.shape(cls_score_labeled)[0], ['rcnn']
)
# Transform to one-hot vector
cls_target_one_hot = tf.one_hot(
cls_target_labeled, depth=self._num_classes + 1,
name='cls_target_one_hot'
)
# We get cross entropy loss of each proposal.
cross_entropy_per_proposal = (
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(cls_target_one_hot),
logits=cls_score_labeled
)
)
if self._debug:
prediction_dict['_debug']['losses'] = {}
# Save the cross entropy per proposal to be able to
# visualize proposals with high and low error.
prediction_dict['_debug']['losses'][
'cross_entropy_per_proposal'
] = (
cross_entropy_per_proposal
)
# Second we need to calculate the smooth l1 loss between
# `bbox_offsets` and `bbox_offsets_target`.
bbox_offsets = prediction_dict['rcnn']['bbox_offsets']
bbox_offsets_target = (
prediction_dict['target']['bbox_offsets']
)
# We only want the non-background labels bounding boxes.
not_ignored = tf.reshape(tf.greater(cls_target, 0), [-1])
bbox_offsets_labeled = tf.boolean_mask(
bbox_offsets, not_ignored, name='bbox_offsets_labeled')
bbox_offsets_target_labeled = tf.boolean_mask(
bbox_offsets_target, not_ignored,
name='bbox_offsets_target_labeled'
)
cls_target_labeled = tf.boolean_mask(
cls_target, not_ignored, name='cls_target_labeled')
# `cls_target_labeled` is based on `cls_target` which has
# `num_classes` + 1 classes.
# for making `one_hot` with depth `num_classes` to work we need
# to lower them to make them 0-index.
cls_target_labeled = cls_target_labeled - 1
cls_target_one_hot = tf.one_hot(
cls_target_labeled, depth=self._num_classes,
name='cls_target_one_hot'
)
# cls_target now is (num_labeled, num_classes)
bbox_flatten = tf.reshape(
bbox_offsets_labeled, [-1, 4], name='bbox_flatten')
# We use the flatten cls_target_one_hot as boolean mask for the
# bboxes.
cls_flatten = tf.cast(tf.reshape(
cls_target_one_hot, [-1]), tf.bool, 'cls_flatten_as_bool')
bbox_offset_cleaned = tf.boolean_mask(
bbox_flatten, cls_flatten, 'bbox_offset_cleaned')
# Calculate the smooth l1 loss between the "cleaned" bboxes
# offsets (that means, the useful results) and the labeled
# targets.
reg_loss_per_proposal = smooth_l1_loss(
bbox_offset_cleaned, bbox_offsets_target_labeled,
sigma=self._l1_sigma
)
tf.summary.scalar(
'rcnn_foreground_samples',
tf.shape(bbox_offset_cleaned)[0], ['rcnn']
)
if self._debug:
# Also save reg loss per proposals to be able to visualize
# good and bad proposals in debug mode.
prediction_dict['_debug']['losses'][
'reg_loss_per_proposal'
] = (
reg_loss_per_proposal
)
return {
'rcnn_cls_loss': tf.reduce_mean(cross_entropy_per_proposal),
'rcnn_reg_loss': tf.reduce_mean(reg_loss_per_proposal),
}
|
|
#
# File: installerservice.py
# This module provides methods for automating the install of the
# Texo CMS engine. It will setup databases and alter configuration files.
#
# Author:
# Adam Presley
#
import os
import re
import imp
import config
import database
from bottle import redirect
from services.engine import postservice
from services.identity import userservice
from services.engine import securityservice
#
# Function: isEngineInstalled
# Returns True/False if the engine is setup and installed.
#
def isEngineInstalled():
return len(config.BLOG_TITLE.strip()) > 0
def setupConfigFile(dbServer, dbName, dbUser, dbPass, blogTitle, postsPerPage, hashKey1, hashKey2, encryptionKey, encryptionIV):
configContents = _getConfigFileContents()
configContents = _configReplaceDbSettings(configContents=configContents, dbServer=dbServer, dbName=dbName, dbUser=dbUser, dbPass=dbPass)
configContents = _configReplaceSessionUrl(configContents=configContents, sessionUrl=_createConnectionString(dbServer=dbServer, dbName=dbName, dbUser=dbUser, dbPass=dbPass))
configContents = _configReplaceBlogTitle(configContents=configContents, blogTitle=blogTitle)
configContents = _configReplacePostsPerPage(configContents=configContents, postsPerPage=postsPerPage)
configContents = _configReplaceSecuritySettings(configContents=configContents, hashKey1=hashKey1, hashKey2=hashKey2, encryptionKey=encryptionKey, encryptionIV=encryptionIV)
_saveConfigFile(configContents)
def setupDatabase(dbServer, dbPort, dbName, dbUser, dbPass, email, password, firstName, lastName, timezone, hashKey1, hashKey2):
#
# TODO: This code is MySQL specific. I would like to
# support other engines at some point
#
database.connect(
engine = "mysql",
host = dbServer,
port = dbPort,
database = "mysql",
user = dbUser,
password = dbPass
)
database.execute("DROP DATABASE IF EXISTS %s;" % dbName)
database.execute("CREATE DATABASE %s;" % dbName)
database.execute("USE %s;" % dbName)
database.execute("""
CREATE TABLE `settings` (
`themeName` VARCHAR(50) NOT NULL DEFAULT 'default',
`timezone` VARCHAR(50) NOT NULL DEFAULT 'UTC'
) ENGINE=MyISAM;
""")
database.execute("""
CREATE TABLE awssettings (
accessKeyId VARCHAR(50),
secretAccessKey VARCHAR(50),
s3Bucket VARCHAR(100)
) ENGINE=MyISAM;
""")
database.execute("""
CREATE TABLE `user` (
`id` INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
`email` VARCHAR(255) NOT NULL UNIQUE,
`password` VARCHAR(255) NOT NULL,
`firstName` VARCHAR(50) NOT NULL,
`lastName` VARCHAR(50) NOT NULL
) ENGINE=MyISAM;
""")
database.execute("CREATE INDEX `idx_user_email` ON `user` (`email`);")
database.execute("""
CREATE TABLE `poststatus` (
`id` INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
`status` VARCHAR(20) NOT NULL
) ENGINE=MyISAM;
""")
database.execute("""
CREATE TABLE `post` (
`id` INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
`title` VARCHAR(175) NOT NULL,
`authorId` INT UNSIGNED NOT NULL,
`slug` VARCHAR(300) NOT NULL,
`content` TEXT,
`createdDateTime` DATETIME,
`publishedDateTime` DATETIME,
`publishedYear` INT,
`publishedMonth` INT,
`postStatusId` INT UNSIGNED,
FOREIGN KEY (authorId) REFERENCES user(id),
FOREIGN KEY (postStatusId) REFERENCES poststatus(id)
) ENGINE=MyISAM;
""")
database.execute("CREATE INDEX `idx_post_publishedDateTime` ON `post` (`publishedDateTime`);")
database.execute("""
CREATE TABLE `posttag` (
`id` INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
`tag` VARCHAR(20) NOT NULL,
`howManyTimesUsed` INT NOT NULL DEFAULT 0,
UNIQUE KEY `posttag_tag` (`tag`)
) ENGINE=MyISAM;
""")
database.execute("CREATE INDEX `idx_posttag_tag` ON `posttag` (`tag`);")
database.execute("""
CREATE TABLE `post_posttag` (
`id` INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
`postId` INT UNSIGNED NOT NULL,
`postTagId` INT UNSIGNED NOT NULL,
UNIQUE KEY `post_posttag_unique_tagandid` (`postId`, `postTagId`),
FOREIGN KEY (`postId`) REFERENCES post(`id`),
FOREIGN KEY (`postTagId`) REFERENCES posttag(`id`)
) ENGINE=MyISAM;
""")
database.execute("""
INSERT INTO settings (themeName, timezone) VALUES
('default', %s)
;
""", (
timezone,
))
database.execute("""
INSERT INTO user (email, password, firstName, lastName) VALUES
(%s, %s, %s, %s)
;
""", (
email,
securityservice.hash(value=password, hashKey1=hashKey1, hashKey2=hashKey2),
firstName,
lastName,
))
database.execute("""
INSERT INTO poststatus (status) VALUES
('Draft'),
('Published'),
('Archived')
;
""")
database.execute("""
INSERT INTO awssettings (accessKeyId, secretAccessKey, s3Bucket) VALUES ('', '', '');
""")
def _configReplaceBlogTitle(configContents, blogTitle):
pattern = re.compile(r'(.*?)BLOG_TITLE\s+=\s+"(.*?)"', re.I | re.S)
result = pattern.sub(r'\1BLOG_TITLE = "' + blogTitle + '"', configContents, count=1)
return result
def _configReplaceDbSettings(configContents, dbServer, dbName, dbUser, dbPass):
pattern1 = re.compile(r'(.*?)"DB_HOST":\s+(.*?)"', re.I | re.S)
pattern2 = re.compile(r'(.*?)"DB_PORT":\s+(.*?),', re.I | re.S)
pattern3 = re.compile(r'(.*?)"DB_NAME":\s+(.*?)"', re.I | re.S)
pattern4 = re.compile(r'(.*?)"DB_USER":\s+(.*?)"', re.I | re.S)
pattern5 = re.compile(r'(.*?)"DB_PASSWORD":\s+(.*?)"', re.I | re.S)
result = pattern1.sub(r'\1"DB_HOST": "' + dbServer, configContents, count=1)
result = pattern2.sub(r'\1"DB_PORT": 3306,', result, count=1)
result = pattern3.sub(r'\1"DB_NAME": "' + dbName, result, count=1)
result = pattern4.sub(r'\1"DB_USER": "' + dbUser, result, count=1)
result = pattern5.sub(r'\1"DB_PASSWORD": "' + dbPass, result, count=1)
return result
def _configReplacePostsPerPage(configContents, postsPerPage):
pattern = re.compile(r'(.*?)POSTS_PER_PAGE\s+=\s+(.*?)\n', re.I | re.S)
result = pattern.sub(r'\1POSTS_PER_PAGE = ' + postsPerPage + '\n', configContents, count=1)
return result
def _configReplaceSecuritySettings(configContents, hashKey1, hashKey2, encryptionKey, encryptionIV):
pattern1 = re.compile(r'(.*?)HASH_KEY_1\s+=\s+(.*?)\n', re.I | re.S)
pattern2 = re.compile(r'(.*?)HASH_KEY_2\s+=\s+(.*?)\n', re.I | re.S)
pattern3 = re.compile(r'(.*?)ENCRYPTION_KEY\s+=\s+(.*?)\n', re.I | re.S)
pattern4 = re.compile(r'(.*?)ENCRYPTION_IV\s+=\s+(.*?)\n', re.I | re.S)
result = pattern1.sub(r'\1HASH_KEY_1 = "' + hashKey1 + '"\n', configContents, count=1)
result = pattern2.sub(r'\1HASH_KEY_2 = "' + hashKey2 + '"\n', result, count=1)
result = pattern3.sub(r'\1ENCRYPTION_KEY = "' + encryptionKey + '"\n', result, count=1)
result = pattern4.sub(r'\1ENCRYPTION_IV = "' + encryptionIV + '"\n', result, count=1)
return result
def _configReplaceSessionUrl(configContents, sessionUrl):
pattern = re.compile(r'(.*?)"SESSION_URL":\s+"(.*?)"', re.I | re.S)
result = pattern.sub(r'\1"SESSION_URL": "' + sessionUrl + '"', configContents, count=1)
return result
def _createConnectionString(dbServer, dbName, dbUser, dbPass):
return "mysql://%s:%s@%s/%s" % (dbUser, dbPass, dbServer, dbName)
def _getConfigFileContents():
contents = ""
with open(os.path.join(config.ROOT_PATH, "config.py"), "r") as f:
contents = f.read()
return contents
def _saveConfigFile(configContents):
with open(os.path.join(config.ROOT_PATH, "config.py"), "w") as f:
f.write(configContents)
|
|
# coding=utf8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Manages a project checkout.
Includes support for svn, git-svn and git.
"""
import fnmatch
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
# The configparser module was renamed in Python 3.
try:
import configparser
except ImportError:
import ConfigParser as configparser
import patch
import scm
import subprocess2
if sys.platform in ('cygwin', 'win32'):
# Disable timeouts on Windows since we can't have shells with timeouts.
GLOBAL_TIMEOUT = None
FETCH_TIMEOUT = None
else:
# Default timeout of 15 minutes.
GLOBAL_TIMEOUT = 15*60
# Use a larger timeout for checkout since it can be a genuinely slower
# operation.
FETCH_TIMEOUT = 30*60
def get_code_review_setting(path, key,
codereview_settings_file='codereview.settings'):
"""Parses codereview.settings and return the value for the key if present.
Don't cache the values in case the file is changed."""
# TODO(maruel): Do not duplicate code.
settings = {}
try:
settings_file = open(os.path.join(path, codereview_settings_file), 'r')
try:
for line in settings_file.readlines():
if not line or line.startswith('#'):
continue
if not ':' in line:
# Invalid file.
return None
k, v = line.split(':', 1)
settings[k.strip()] = v.strip()
finally:
settings_file.close()
except IOError:
return None
return settings.get(key, None)
def align_stdout(stdout):
"""Returns the aligned output of multiple stdouts."""
output = ''
for item in stdout:
item = item.strip()
if not item:
continue
output += ''.join(' %s\n' % line for line in item.splitlines())
return output
class PatchApplicationFailed(Exception):
"""Patch failed to be applied."""
def __init__(self, p, status):
super(PatchApplicationFailed, self).__init__(p, status)
self.patch = p
self.status = status
@property
def filename(self):
if self.patch:
return self.patch.filename
def __str__(self):
out = []
if self.filename:
out.append('Failed to apply patch for %s:' % self.filename)
if self.status:
out.append(self.status)
if self.patch:
out.append('Patch: %s' % self.patch.dump())
return '\n'.join(out)
class CheckoutBase(object):
# Set to None to have verbose output.
VOID = subprocess2.VOID
def __init__(self, root_dir, project_name, post_processors):
"""
Args:
post_processor: list of lambda(checkout, patches) to call on each of the
modified files.
"""
super(CheckoutBase, self).__init__()
self.root_dir = root_dir
self.project_name = project_name
if self.project_name is None:
self.project_path = self.root_dir
else:
self.project_path = os.path.join(self.root_dir, self.project_name)
# Only used for logging purposes.
self._last_seen_revision = None
self.post_processors = post_processors
assert self.root_dir
assert self.project_path
assert os.path.isabs(self.project_path)
def get_settings(self, key):
return get_code_review_setting(self.project_path, key)
def prepare(self, revision):
"""Checks out a clean copy of the tree and removes any local modification.
This function shouldn't throw unless the remote repository is inaccessible,
there is no free disk space or hard issues like that.
Args:
revision: The revision it should sync to, SCM specific.
"""
raise NotImplementedError()
def apply_patch(self, patches, post_processors=None, verbose=False):
"""Applies a patch and returns the list of modified files.
This function should throw patch.UnsupportedPatchFormat or
PatchApplicationFailed when relevant.
Args:
patches: patch.PatchSet object.
"""
raise NotImplementedError()
def commit(self, commit_message, user):
"""Commits the patch upstream, while impersonating 'user'."""
raise NotImplementedError()
def revisions(self, rev1, rev2):
"""Returns the count of revisions from rev1 to rev2, e.g. len(]rev1, rev2]).
If rev2 is None, it means 'HEAD'.
Returns None if there is no link between the two.
"""
raise NotImplementedError()
class RawCheckout(CheckoutBase):
"""Used to apply a patch locally without any intent to commit it.
To be used by the try server.
"""
def prepare(self, revision):
"""Stubbed out."""
pass
def apply_patch(self, patches, post_processors=None, verbose=False):
"""Ignores svn properties."""
post_processors = post_processors or self.post_processors or []
for p in patches:
stdout = []
try:
filepath = os.path.join(self.project_path, p.filename)
if p.is_delete:
os.remove(filepath)
assert(not os.path.exists(filepath))
stdout.append('Deleted.')
else:
dirname = os.path.dirname(p.filename)
full_dir = os.path.join(self.project_path, dirname)
if dirname and not os.path.isdir(full_dir):
os.makedirs(full_dir)
stdout.append('Created missing directory %s.' % dirname)
if p.is_binary:
content = p.get()
with open(filepath, 'wb') as f:
f.write(content)
stdout.append('Added binary file %d bytes.' % len(content))
else:
if p.source_filename:
if not p.is_new:
raise PatchApplicationFailed(
p,
'File has a source filename specified but is not new')
# Copy the file first.
if os.path.isfile(filepath):
raise PatchApplicationFailed(
p, 'File exist but was about to be overwriten')
shutil.copy2(
os.path.join(self.project_path, p.source_filename), filepath)
stdout.append('Copied %s -> %s' % (p.source_filename, p.filename))
if p.diff_hunks:
cmd = ['patch', '-u', '--binary', '-p%s' % p.patchlevel]
if verbose:
cmd.append('--verbose')
env = os.environ.copy()
env['TMPDIR'] = tempfile.mkdtemp(prefix='crpatch')
try:
stdout.append(
subprocess2.check_output(
cmd,
stdin=p.get(False),
stderr=subprocess2.STDOUT,
cwd=self.project_path,
timeout=GLOBAL_TIMEOUT,
env=env))
finally:
shutil.rmtree(env['TMPDIR'])
elif p.is_new and not os.path.exists(filepath):
# There is only a header. Just create the file.
open(filepath, 'w').close()
stdout.append('Created an empty file.')
for post in post_processors:
post(self, p)
if verbose:
print p.filename
print align_stdout(stdout)
except OSError, e:
raise PatchApplicationFailed(p, '%s%s' % (align_stdout(stdout), e))
except subprocess.CalledProcessError, e:
raise PatchApplicationFailed(
p,
'While running %s;\n%s%s' % (
' '.join(e.cmd),
align_stdout(stdout),
align_stdout([getattr(e, 'stdout', '')])))
def commit(self, commit_message, user):
"""Stubbed out."""
raise NotImplementedError('RawCheckout can\'t commit')
def revisions(self, _rev1, _rev2):
return None
class SvnConfig(object):
"""Parses a svn configuration file."""
def __init__(self, svn_config_dir=None):
super(SvnConfig, self).__init__()
self.svn_config_dir = svn_config_dir
self.default = not bool(self.svn_config_dir)
if not self.svn_config_dir:
if sys.platform == 'win32':
self.svn_config_dir = os.path.join(os.environ['APPDATA'], 'Subversion')
else:
self.svn_config_dir = os.path.expanduser(
os.path.join('~', '.subversion'))
svn_config_file = os.path.join(self.svn_config_dir, 'config')
parser = configparser.SafeConfigParser()
if os.path.isfile(svn_config_file):
parser.read(svn_config_file)
else:
parser.add_section('auto-props')
self.auto_props = dict(parser.items('auto-props'))
class SvnMixIn(object):
"""MixIn class to add svn commands common to both svn and git-svn clients."""
# These members need to be set by the subclass.
commit_user = None
commit_pwd = None
svn_url = None
project_path = None
# Override at class level when necessary. If used, --non-interactive is
# implied.
svn_config = SvnConfig()
# Set to True when non-interactivity is necessary but a custom subversion
# configuration directory is not necessary.
non_interactive = False
def _add_svn_flags(self, args, non_interactive, credentials=True):
args = ['svn'] + args
if not self.svn_config.default:
args.extend(['--config-dir', self.svn_config.svn_config_dir])
if not self.svn_config.default or self.non_interactive or non_interactive:
args.append('--non-interactive')
if credentials:
if self.commit_user:
args.extend(['--username', self.commit_user])
if self.commit_pwd:
args.extend(['--password', self.commit_pwd])
return args
def _check_call_svn(self, args, **kwargs):
"""Runs svn and throws an exception if the command failed."""
kwargs.setdefault('cwd', self.project_path)
kwargs.setdefault('stdout', self.VOID)
kwargs.setdefault('timeout', GLOBAL_TIMEOUT)
return subprocess2.check_call_out(
self._add_svn_flags(args, False), **kwargs)
def _check_output_svn(self, args, credentials=True, **kwargs):
"""Runs svn and throws an exception if the command failed.
Returns the output.
"""
kwargs.setdefault('cwd', self.project_path)
return subprocess2.check_output(
self._add_svn_flags(args, True, credentials),
stderr=subprocess2.STDOUT,
timeout=GLOBAL_TIMEOUT,
**kwargs)
@staticmethod
def _parse_svn_info(output, key):
"""Returns value for key from svn info output.
Case insensitive.
"""
values = {}
key = key.lower()
for line in output.splitlines(False):
if not line:
continue
k, v = line.split(':', 1)
k = k.strip().lower()
v = v.strip()
assert not k in values
values[k] = v
return values.get(key, None)
class SvnCheckout(CheckoutBase, SvnMixIn):
"""Manages a subversion checkout."""
def __init__(self, root_dir, project_name, commit_user, commit_pwd, svn_url,
post_processors=None):
CheckoutBase.__init__(self, root_dir, project_name, post_processors)
SvnMixIn.__init__(self)
self.commit_user = commit_user
self.commit_pwd = commit_pwd
self.svn_url = svn_url
assert bool(self.commit_user) >= bool(self.commit_pwd)
def prepare(self, revision):
# Will checkout if the directory is not present.
assert self.svn_url
if not os.path.isdir(self.project_path):
logging.info('Checking out %s in %s' %
(self.project_name, self.project_path))
return self._revert(revision)
def apply_patch(self, patches, post_processors=None, verbose=False):
post_processors = post_processors or self.post_processors or []
for p in patches:
stdout = []
try:
filepath = os.path.join(self.project_path, p.filename)
# It is important to use credentials=False otherwise credentials could
# leak in the error message. Credentials are not necessary here for the
# following commands anyway.
if p.is_delete:
stdout.append(self._check_output_svn(
['delete', p.filename, '--force'], credentials=False))
assert(not os.path.exists(filepath))
stdout.append('Deleted.')
else:
# svn add while creating directories otherwise svn add on the
# contained files will silently fail.
# First, find the root directory that exists.
dirname = os.path.dirname(p.filename)
dirs_to_create = []
while (dirname and
not os.path.isdir(os.path.join(self.project_path, dirname))):
dirs_to_create.append(dirname)
dirname = os.path.dirname(dirname)
for dir_to_create in reversed(dirs_to_create):
os.mkdir(os.path.join(self.project_path, dir_to_create))
stdout.append(
self._check_output_svn(
['add', dir_to_create, '--force'], credentials=False))
stdout.append('Created missing directory %s.' % dir_to_create)
if p.is_binary:
content = p.get()
with open(filepath, 'wb') as f:
f.write(content)
stdout.append('Added binary file %d bytes.' % len(content))
else:
if p.source_filename:
if not p.is_new:
raise PatchApplicationFailed(
p,
'File has a source filename specified but is not new')
# Copy the file first.
if os.path.isfile(filepath):
raise PatchApplicationFailed(
p, 'File exist but was about to be overwriten')
stdout.append(
self._check_output_svn(
['copy', p.source_filename, p.filename]))
stdout.append('Copied %s -> %s' % (p.source_filename, p.filename))
if p.diff_hunks:
cmd = [
'patch',
'-p%s' % p.patchlevel,
'--forward',
'--force',
'--no-backup-if-mismatch',
]
env = os.environ.copy()
env['TMPDIR'] = tempfile.mkdtemp(prefix='crpatch')
try:
stdout.append(
subprocess2.check_output(
cmd,
stdin=p.get(False),
cwd=self.project_path,
timeout=GLOBAL_TIMEOUT,
env=env))
finally:
shutil.rmtree(env['TMPDIR'])
elif p.is_new and not os.path.exists(filepath):
# There is only a header. Just create the file if it doesn't
# exist.
open(filepath, 'w').close()
stdout.append('Created an empty file.')
if p.is_new and not p.source_filename:
# Do not run it if p.source_filename is defined, since svn copy was
# using above.
stdout.append(
self._check_output_svn(
['add', p.filename, '--force'], credentials=False))
for name, value in p.svn_properties:
if value is None:
stdout.append(
self._check_output_svn(
['propdel', '--quiet', name, p.filename],
credentials=False))
stdout.append('Property %s deleted.' % name)
else:
stdout.append(
self._check_output_svn(
['propset', name, value, p.filename], credentials=False))
stdout.append('Property %s=%s' % (name, value))
for prop, values in self.svn_config.auto_props.iteritems():
if fnmatch.fnmatch(p.filename, prop):
for value in values.split(';'):
if '=' not in value:
params = [value, '.']
else:
params = value.split('=', 1)
if params[1] == '*':
# Works around crbug.com/150960 on Windows.
params[1] = '.'
stdout.append(
self._check_output_svn(
['propset'] + params + [p.filename], credentials=False))
stdout.append('Property (auto) %s' % '='.join(params))
for post in post_processors:
post(self, p)
if verbose:
print p.filename
print align_stdout(stdout)
except OSError, e:
raise PatchApplicationFailed(p, '%s%s' % (align_stdout(stdout), e))
except subprocess.CalledProcessError, e:
raise PatchApplicationFailed(
p,
'While running %s;\n%s%s' % (
' '.join(e.cmd),
align_stdout(stdout),
align_stdout([getattr(e, 'stdout', '')])))
def commit(self, commit_message, user):
logging.info('Committing patch for %s' % user)
assert self.commit_user
assert isinstance(commit_message, unicode)
handle, commit_filename = tempfile.mkstemp(text=True)
try:
# Shouldn't assume default encoding is UTF-8. But really, if you are using
# anything else, you are living in another world.
os.write(handle, commit_message.encode('utf-8'))
os.close(handle)
# When committing, svn won't update the Revision metadata of the checkout,
# so if svn commit returns "Committed revision 3.", svn info will still
# return "Revision: 2". Since running svn update right after svn commit
# creates a race condition with other committers, this code _must_ parse
# the output of svn commit and use a regexp to grab the revision number.
# Note that "Committed revision N." is localized but subprocess2 forces
# LANGUAGE=en.
args = ['commit', '--file', commit_filename]
# realauthor is parsed by a server-side hook.
if user and user != self.commit_user:
args.extend(['--with-revprop', 'realauthor=%s' % user])
out = self._check_output_svn(args)
finally:
os.remove(commit_filename)
lines = filter(None, out.splitlines())
match = re.match(r'^Committed revision (\d+).$', lines[-1])
if not match:
raise PatchApplicationFailed(
None,
'Couldn\'t make sense out of svn commit message:\n' + out)
return int(match.group(1))
def _revert(self, revision):
"""Reverts local modifications or checks out if the directory is not
present. Use depot_tools's functionality to do this.
"""
flags = ['--ignore-externals']
if revision:
flags.extend(['--revision', str(revision)])
if os.path.isdir(self.project_path):
# This may remove any part (or all) of the checkout.
scm.SVN.Revert(self.project_path, no_ignore=True)
if os.path.isdir(self.project_path):
# Revive files that were deleted in scm.SVN.Revert().
self._check_call_svn(['update', '--force'] + flags,
timeout=FETCH_TIMEOUT)
else:
logging.info(
'Directory %s is not present, checking it out.' % self.project_path)
self._check_call_svn(
['checkout', self.svn_url, self.project_path] + flags, cwd=None,
timeout=FETCH_TIMEOUT)
return self._get_revision()
def _get_revision(self):
out = self._check_output_svn(['info', '.'])
revision = int(self._parse_svn_info(out, 'revision'))
if revision != self._last_seen_revision:
logging.info('Updated to revision %d' % revision)
self._last_seen_revision = revision
return revision
def revisions(self, rev1, rev2):
"""Returns the number of actual commits, not just the difference between
numbers.
"""
rev2 = rev2 or 'HEAD'
# Revision range is inclusive and ordering doesn't matter, they'll appear in
# the order specified.
try:
out = self._check_output_svn(
['log', '-q', self.svn_url, '-r', '%s:%s' % (rev1, rev2)])
except subprocess.CalledProcessError:
return None
# Ignore the '----' lines.
return len([l for l in out.splitlines() if l.startswith('r')]) - 1
class GitCheckout(CheckoutBase):
"""Manages a git checkout."""
def __init__(self, root_dir, project_name, remote_branch, git_url,
commit_user, post_processors=None):
super(GitCheckout, self).__init__(root_dir, project_name, post_processors)
self.git_url = git_url
self.commit_user = commit_user
self.remote_branch = remote_branch
# The working branch where patches will be applied. It will track the
# remote branch.
self.working_branch = 'working_branch'
# There is no reason to not hardcode origin.
self.remote = 'origin'
# There is no reason to not hardcode master.
self.master_branch = 'master'
def prepare(self, revision):
"""Resets the git repository in a clean state.
Checks it out if not present and deletes the working branch.
"""
assert self.remote_branch
assert self.git_url
if not os.path.isdir(self.project_path):
# Clone the repo if the directory is not present.
logging.info(
'Checking out %s in %s', self.project_name, self.project_path)
self._check_call_git(
['clone', self.git_url, '-b', self.remote_branch, self.project_path],
cwd=None, timeout=FETCH_TIMEOUT)
else:
# Throw away all uncommitted changes in the existing checkout.
self._check_call_git(['checkout', self.remote_branch])
self._check_call_git(
['reset', '--hard', '--quiet',
'%s/%s' % (self.remote, self.remote_branch)])
if revision:
try:
# Look if the commit hash already exist. If so, we can skip a
# 'git fetch' call.
revision = self._check_output_git(['rev-parse', revision]).rstrip()
except subprocess.CalledProcessError:
self._check_call_git(
['fetch', self.remote, self.remote_branch, '--quiet'])
revision = self._check_output_git(['rev-parse', revision]).rstrip()
self._check_call_git(['checkout', '--force', '--quiet', revision])
else:
branches, active = self._branches()
if active != self.master_branch:
self._check_call_git(
['checkout', '--force', '--quiet', self.master_branch])
self._sync_remote_branch()
if self.working_branch in branches:
self._call_git(['branch', '-D', self.working_branch])
return self._get_head_commit_hash()
def _sync_remote_branch(self):
"""Syncs the remote branch."""
# We do a 'git pull origin master:refs/remotes/origin/master' instead of
# 'git pull origin master' because from the manpage for git-pull:
# A parameter <ref> without a colon is equivalent to <ref>: when
# pulling/fetching, so it merges <ref> into the current branch without
# storing the remote branch anywhere locally.
remote_tracked_path = 'refs/remotes/%s/%s' % (
self.remote, self.remote_branch)
self._check_call_git(
['pull', self.remote,
'%s:%s' % (self.remote_branch, remote_tracked_path),
'--quiet'])
def _get_head_commit_hash(self):
"""Gets the current revision (in unicode) from the local branch."""
return unicode(self._check_output_git(['rev-parse', 'HEAD']).strip())
def apply_patch(self, patches, post_processors=None, verbose=False):
"""Applies a patch on 'working_branch' and switches to it.
The changes remain staged on the current branch.
Ignores svn properties and raise an exception on unexpected ones.
"""
post_processors = post_processors or self.post_processors or []
# It this throws, the checkout is corrupted. Maybe worth deleting it and
# trying again?
if self.remote_branch:
self._check_call_git(
['checkout', '-b', self.working_branch, '-t', self.remote_branch,
'--quiet'])
for index, p in enumerate(patches):
stdout = []
try:
filepath = os.path.join(self.project_path, p.filename)
if p.is_delete:
if (not os.path.exists(filepath) and
any(p1.source_filename == p.filename for p1 in patches[0:index])):
# The file was already deleted if a prior patch with file rename
# was already processed because 'git apply' did it for us.
pass
else:
stdout.append(self._check_output_git(['rm', p.filename]))
assert(not os.path.exists(filepath))
stdout.append('Deleted.')
else:
dirname = os.path.dirname(p.filename)
full_dir = os.path.join(self.project_path, dirname)
if dirname and not os.path.isdir(full_dir):
os.makedirs(full_dir)
stdout.append('Created missing directory %s.' % dirname)
if p.is_binary:
content = p.get()
with open(filepath, 'wb') as f:
f.write(content)
stdout.append('Added binary file %d bytes' % len(content))
cmd = ['add', p.filename]
if verbose:
cmd.append('--verbose')
stdout.append(self._check_output_git(cmd))
else:
# No need to do anything special with p.is_new or if not
# p.diff_hunks. git apply manages all that already.
cmd = ['apply', '--index', '-3', '-p%s' % p.patchlevel]
if verbose:
cmd.append('--verbose')
stdout.append(self._check_output_git(cmd, stdin=p.get(True)))
for key, value in p.svn_properties:
# Ignore some known auto-props flags through .subversion/config,
# bails out on the other ones.
# TODO(maruel): Read ~/.subversion/config and detect the rules that
# applies here to figure out if the property will be correctly
# handled.
stdout.append('Property %s=%s' % (key, value))
if not key in (
'svn:eol-style', 'svn:executable', 'svn:mime-type'):
raise patch.UnsupportedPatchFormat(
p.filename,
'Cannot apply svn property %s to file %s.' % (
key, p.filename))
for post in post_processors:
post(self, p)
if verbose:
print p.filename
print align_stdout(stdout)
except OSError, e:
raise PatchApplicationFailed(p, '%s%s' % (align_stdout(stdout), e))
except subprocess.CalledProcessError, e:
raise PatchApplicationFailed(
p,
'While running %s;\n%s%s' % (
' '.join(e.cmd),
align_stdout(stdout),
align_stdout([getattr(e, 'stdout', '')])))
found_files = self._check_output_git(
['diff', '--ignore-submodules',
'--name-only', '--staged']).splitlines(False)
if sorted(patches.filenames) != sorted(found_files):
extra_files = sorted(set(found_files) - set(patches.filenames))
unpatched_files = sorted(set(patches.filenames) - set(found_files))
if extra_files:
print 'Found extra files: %r' % (extra_files,)
if unpatched_files:
print 'Found unpatched files: %r' % (unpatched_files,)
def commit(self, commit_message, user):
"""Commits, updates the commit message and pushes."""
# TODO(hinoka): CQ no longer uses this, I think its deprecated.
# Delete this.
assert self.commit_user
assert isinstance(commit_message, unicode)
current_branch = self._check_output_git(
['rev-parse', '--abbrev-ref', 'HEAD']).strip()
assert current_branch == self.working_branch
commit_cmd = ['commit', '-m', commit_message]
if user and user != self.commit_user:
# We do not have the first or last name of the user, grab the username
# from the email and call it the original author's name.
# TODO(rmistry): Do not need the below if user is already in
# "Name <email>" format.
name = user.split('@')[0]
commit_cmd.extend(['--author', '%s <%s>' % (name, user)])
self._check_call_git(commit_cmd)
# Push to the remote repository.
self._check_call_git(
['push', 'origin', '%s:%s' % (self.working_branch, self.remote_branch),
'--quiet'])
# Get the revision after the push.
revision = self._get_head_commit_hash()
# Switch back to the remote_branch and sync it.
self._check_call_git(['checkout', self.remote_branch])
self._sync_remote_branch()
# Delete the working branch since we are done with it.
self._check_call_git(['branch', '-D', self.working_branch])
return revision
def _check_call_git(self, args, **kwargs):
kwargs.setdefault('cwd', self.project_path)
kwargs.setdefault('stdout', self.VOID)
kwargs.setdefault('timeout', GLOBAL_TIMEOUT)
return subprocess2.check_call_out(['git'] + args, **kwargs)
def _call_git(self, args, **kwargs):
"""Like check_call but doesn't throw on failure."""
kwargs.setdefault('cwd', self.project_path)
kwargs.setdefault('stdout', self.VOID)
kwargs.setdefault('timeout', GLOBAL_TIMEOUT)
return subprocess2.call(['git'] + args, **kwargs)
def _check_output_git(self, args, **kwargs):
kwargs.setdefault('cwd', self.project_path)
kwargs.setdefault('timeout', GLOBAL_TIMEOUT)
return subprocess2.check_output(
['git'] + args, stderr=subprocess2.STDOUT, **kwargs)
def _branches(self):
"""Returns the list of branches and the active one."""
out = self._check_output_git(['branch']).splitlines(False)
branches = [l[2:] for l in out]
active = None
for l in out:
if l.startswith('*'):
active = l[2:]
break
return branches, active
def revisions(self, rev1, rev2):
"""Returns the number of actual commits between both hash."""
self._fetch_remote()
rev2 = rev2 or '%s/%s' % (self.remote, self.remote_branch)
# Revision range is ]rev1, rev2] and ordering matters.
try:
out = self._check_output_git(
['log', '--format="%H"' , '%s..%s' % (rev1, rev2)])
except subprocess.CalledProcessError:
return None
return len(out.splitlines())
def _fetch_remote(self):
"""Fetches the remote without rebasing."""
# git fetch is always verbose even with -q, so redirect its output.
self._check_output_git(['fetch', self.remote, self.remote_branch],
timeout=FETCH_TIMEOUT)
class ReadOnlyCheckout(object):
"""Converts a checkout into a read-only one."""
def __init__(self, checkout, post_processors=None):
super(ReadOnlyCheckout, self).__init__()
self.checkout = checkout
self.post_processors = (post_processors or []) + (
self.checkout.post_processors or [])
def prepare(self, revision):
return self.checkout.prepare(revision)
def get_settings(self, key):
return self.checkout.get_settings(key)
def apply_patch(self, patches, post_processors=None, verbose=False):
return self.checkout.apply_patch(
patches, post_processors or self.post_processors, verbose)
def commit(self, message, user): # pylint: disable=R0201
logging.info('Would have committed for %s with message: %s' % (
user, message))
return 'FAKE'
def revisions(self, rev1, rev2):
return self.checkout.revisions(rev1, rev2)
@property
def project_name(self):
return self.checkout.project_name
@property
def project_path(self):
return self.checkout.project_path
|
|
"""A Dialog Window to display the result of a :class:`jukeboxcore.action.ActionCollection`."""
import abc
from PySide import QtGui, QtCore
from jukeboxcore.gui.main import JB_Dialog, JB_MainWindow
from jukeboxcore.gui.actionreport import create_action_model
from jukeboxcore.gui.widgetdelegate import WidgetDelegate, WD_TableView
from actionreportdialog_ui import Ui_ActionReportDialog
class TextPopupButton(QtGui.QPushButton):
"""A abstract push button that will show a textedit as popup when you click on it
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.WidgetDelegate`.
Subclass it and reimplement :meth:`TextPopupButton.get_popup_text`
"""
def __init__(self, popuptitle, text, parent=None):
"""
:param popuptitle: Title for the popup. shown in the titlebar of the popup
:type popuptitle: str
:param text: Text on the button. Not in the popup.
:type text: str
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(TextPopupButton, self).__init__(text, parent)
self.popuptitle = popuptitle
self.setAutoFillBackground(True)
self.setText(text)
self.clicked.connect(self.show_popup)
def show_popup(self, *args, **kwargs):
"""Show a popup with a textedit
:returns: None
:rtype: None
:raises: None
"""
self.mw = JB_MainWindow(parent=self, flags=QtCore.Qt.Dialog)
self.mw.setWindowTitle(self.popuptitle)
self.mw.setWindowModality(QtCore.Qt.ApplicationModal)
w = QtGui.QWidget()
self.mw.setCentralWidget(w)
vbox = QtGui.QVBoxLayout(w)
pte = QtGui.QPlainTextEdit()
pte.setPlainText(self.get_popup_text())
vbox.addWidget(pte)
# move window to cursor position
d = self.cursor().pos() - self.mw.mapToGlobal(self.mw.pos())
self.mw.move(d)
self.mw.show()
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
pass
class TracebackButton(TextPopupButton):
"""A push button that will show the traceback of an :class:`jukeboxcore.action.ActionUnit`.
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.ActionUnitDelegate`.
"""
def __init__(self, parent=None):
"""Initialize a new TracebackButton
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(TracebackButton, self).__init__("Traceback", "Show Traceback", parent)
self.actionunit = None # the current action unit
def set_index(self, index):
"""Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
item = index.internalPointer()
self.actionunit = item.internal_data()
self.setEnabled(bool(self.actionunit.status.traceback))
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
if self.actionunit:
return self.actionunit.status.traceback
else:
return ""
class MessageButton(TextPopupButton):
"""A push button that will show the message of an :class:`jukeboxcore.action.ActionUnit`.
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.ActionUnitDelegate`.
"""
def __init__(self, parent=None):
"""Initialize a new MessageButton
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(MessageButton, self).__init__("Message", "Show Message", parent)
self.actionunit = None # the current action unit
def set_index(self, index):
"""Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
item = index.internalPointer()
self.actionunit = item.internal_data()
self.setEnabled(bool(self.actionunit.status.message))
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
if self.actionunit:
return self.actionunit.status.message
else:
return ""
class ActionUnitTracebackDelegate(WidgetDelegate):
"""A delegate for drawing the tracebackcolumn of a :class:`jukeboxcore.gui.actionreport.ActionItenData`.
"""
def __init__(self, parent=None):
"""
:param parent: the parent object
:type parent: QObject
:raises: None
"""
super(ActionUnitTracebackDelegate, self).__init__(parent)
def create_widget(self, parent=None):
"""Return a widget that should get painted by the delegate
You might want to use this in :meth:`WidgetDelegate.createEditor`
:returns: The created widget | None
:rtype: QtGui.QWidget | None
:raises: None
"""
return TracebackButton(parent)
def set_widget_index(self, index):
"""Set the index for the widget. The widget should retrieve data from the index and display it.
You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.widget.set_index(index)
def create_editor_widget(self, parent, option, index):
"""Return the editor to be used for editing the data item with the given index.
Note that the index contains information about the model being used.
The editor's parent widget is specified by parent, and the item options by option.
:param parent: the parent widget
:type parent: QtGui.QWidget
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: Widget
:rtype: :class:`QtGui.QWidget`
:raises: None
"""
return self.create_widget(parent)
def setEditorData(self, editor, index):
"""Sets the contents of the given editor to the data for the item at the given index.
Note that the index contains information about the model being used.
:param editor: the editor widget
:type editor: QtGui.QWidget
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
editor.set_index(index)
class ActionUnitMessageDelegate(WidgetDelegate):
"""A delegate for drawing the tracebackcolumn of a :class:`jukeboxcore.gui.actionreport.ActionItenData`.
"""
def __init__(self, parent=None):
"""
:param parent: the parent object
:type parent: QObject
:raises: None
"""
super(ActionUnitMessageDelegate, self).__init__(parent)
def create_widget(self, parent=None):
"""Return a widget that should get painted by the delegate
You might want to use this in :meth:`WidgetDelegate.createEditor`
:returns: The created widget | None
:rtype: QtGui.QWidget | None
:raises: None
"""
return MessageButton(parent)
def set_widget_index(self, index):
"""Set the index for the widget. The widget should retrieve data from the index and display it.
You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.widget.set_index(index)
def create_editor_widget(self, parent, option, index):
"""Return the editor to be used for editing the data item with the given index.
Note that the index contains information about the model being used.
The editor's parent widget is specified by parent, and the item options by option.
:param parent: the parent widget
:type parent: QtGui.QWidget
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: Widget
:rtype: :class:`QtGui.QWidget`
:raises: None
"""
return self.create_widget(parent)
def setEditorData(self, editor, index):
"""Sets the contents of the given editor to the data for the item at the given index.
Note that the index contains information about the model being used.
:param editor: the editor widget
:type editor: QtGui.QWidget
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
editor.set_index(index)
class ActionReportDialog(JB_Dialog, Ui_ActionReportDialog):
"""A dialog that can show the result of a :class:`jukeboxcore.action.ActionCollection`
The dialog will ask the user to confirm the report or cancel.
The dialog uses the actionreportdialog.ui for it's layout.
"""
def __init__(self, actioncollection, parent=None, flags=0):
"""Construct a new dialog for the given action collection
:param actioncollection: the action collection to report
:type actioncollection: :class:`jukeboxcore.action.ActionCollection`
:param parent: Optional - the parent of the window - default is None
:type parent: QWidget
:param flags: the window flags
:type flags: QtCore.Qt.WindowFlags
:raises: None
"""
super(ActionReportDialog, self).__init__(parent, flags)
self.setupUi(self)
self._actioncollection = actioncollection
self._parent = parent
self._flags = flags
status = self._actioncollection.status()
self.status_lb.setText(status.value)
self.message_lb.setText(status.message)
self.traceback_pte.setPlainText(status.traceback)
self.traceback_pte.setVisible(False)
model = create_action_model(self._actioncollection)
self.actions_tablev = WD_TableView(self)
self.actions_tablev.setModel(model)
self.verticalLayout.insertWidget(1, self.actions_tablev)
self.msgdelegate = ActionUnitMessageDelegate(self)
self.tbdelegate = ActionUnitTracebackDelegate(self)
self.actions_tablev.setItemDelegateForColumn(3, self.msgdelegate)
self.actions_tablev.setItemDelegateForColumn(4, self.tbdelegate)
self.actions_tablev.horizontalHeader().setStretchLastSection(True)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Use avbin to decode audio and video media.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import struct
import ctypes
import threading
import time
import pyglet
from pyglet import gl
from pyglet.gl import gl_info
from pyglet import image
import pyglet.lib
from pyglet.media import \
MediaFormatException, StreamingSource, VideoFormat, AudioFormat, \
AudioData, MediaEvent, WorkerThread, SourceInfo
from pyglet.compat import asbytes, asbytes_filename
if pyglet.compat_platform.startswith('win') and struct.calcsize('P') == 8:
av = 'avbin64'
else:
av = 'avbin'
av = pyglet.lib.load_library(av)
AVBIN_RESULT_ERROR = -1
AVBIN_RESULT_OK = 0
AVbinResult = ctypes.c_int
AVBIN_STREAM_TYPE_UNKNOWN = 0
AVBIN_STREAM_TYPE_VIDEO = 1
AVBIN_STREAM_TYPE_AUDIO = 2
AVbinStreamType = ctypes.c_int
AVBIN_SAMPLE_FORMAT_U8 = 0
AVBIN_SAMPLE_FORMAT_S16 = 1
AVBIN_SAMPLE_FORMAT_S24 = 2
AVBIN_SAMPLE_FORMAT_S32 = 3
AVBIN_SAMPLE_FORMAT_FLOAT = 4
AVbinSampleFormat = ctypes.c_int
AVBIN_LOG_QUIET = -8
AVBIN_LOG_PANIC = 0
AVBIN_LOG_FATAL = 8
AVBIN_LOG_ERROR = 16
AVBIN_LOG_WARNING = 24
AVBIN_LOG_INFO = 32
AVBIN_LOG_VERBOSE = 40
AVBIN_LOG_DEBUG = 48
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('n_streams', ctypes.c_int),
('start_time', Timestamp),
('duration', Timestamp),
('title', ctypes.c_char * 512),
('author', ctypes.c_char * 512),
('copyright', ctypes.c_char * 512),
('comment', ctypes.c_char * 512),
('album', ctypes.c_char * 512),
('year', ctypes.c_int),
('track', ctypes.c_int),
('genre', ctypes.c_char * 32),
]
class _AVbinStreamInfoVideo8(ctypes.Structure):
_fields_ = [
('width', ctypes.c_uint),
('height', ctypes.c_uint),
('sample_aspect_num', ctypes.c_uint),
('sample_aspect_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class _AVbinStreamInfoAudio8(ctypes.Structure):
_fields_ = [
('sample_format', ctypes.c_int),
('sample_rate', ctypes.c_uint),
('sample_bits', ctypes.c_uint),
('channels', ctypes.c_uint),
]
class _AVbinStreamInfoUnion8(ctypes.Union):
_fields_ = [
('video', _AVbinStreamInfoVideo8),
('audio', _AVbinStreamInfoAudio8),
]
class AVbinStreamInfo8(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('type', ctypes.c_int),
('u', _AVbinStreamInfoUnion8)
]
class AVbinPacket(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('timestamp', Timestamp),
('stream_index', ctypes.c_int),
('data', ctypes.POINTER(ctypes.c_uint8)),
('size', ctypes.c_size_t),
]
AVbinLogCallback = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p)
av.avbin_get_version.restype = ctypes.c_int
av.avbin_get_ffmpeg_revision.restype = ctypes.c_int
av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t
av.avbin_have_feature.restype = ctypes.c_int
av.avbin_have_feature.argtypes = [ctypes.c_char_p]
av.avbin_init.restype = AVbinResult
av.avbin_set_log_level.restype = AVbinResult
av.avbin_set_log_level.argtypes = [AVbinLogLevel]
av.avbin_set_log_callback.argtypes = [AVbinLogCallback]
av.avbin_open_filename.restype = AVbinFileP
av.avbin_open_filename.argtypes = [ctypes.c_char_p]
av.avbin_close_file.argtypes = [AVbinFileP]
av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp]
av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)]
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo8)]
av.avbin_open_stream.restype = ctypes.c_void_p
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)]
av.avbin_read.restype = AVbinResult
av.avbin_decode_audio.restype = ctypes.c_int
av.avbin_decode_audio.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]
av.avbin_decode_video.restype = ctypes.c_int
av.avbin_decode_video.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
if True:
# XXX lock all avbin calls. not clear from ffmpeg documentation if this
# is necessary. leaving it on while debugging to rule out the possiblity
# of a problem.
def synchronize(func, lock):
def f(*args):
lock.acquire()
result = func(*args)
lock.release()
return result
return f
_avbin_lock = threading.Lock()
for name in dir(av):
if name.startswith('avbin_'):
setattr(av, name, synchronize(getattr(av, name), _avbin_lock))
def get_version():
return av.avbin_get_version()
class AVbinException(MediaFormatException):
pass
def timestamp_from_avbin(timestamp):
return float(timestamp) / 1000000
def timestamp_to_avbin(timestamp):
return int(timestamp * 1000000)
class VideoPacket(object):
_next_id = 0
def __init__(self, packet):
self.timestamp = timestamp_from_avbin(packet.timestamp)
self.data = (ctypes.c_uint8 * packet.size)()
self.size = packet.size
ctypes.memmove(self.data, packet.data, self.size)
# Decoded image. 0 == not decoded yet; None == Error or discarded
self.image = 0
self.id = self._next_id
self.__class__._next_id += 1
class AVbinSource(StreamingSource):
def __init__(self, filename, file=None):
if file is not None:
raise NotImplementedError('TODO: Load from file stream')
self._file = av.avbin_open_filename(asbytes_filename(filename))
if not self._file:
raise AVbinException('Could not open "%s"' % filename)
self._video_stream = None
self._video_stream_index = -1
self._audio_stream = None
self._audio_stream_index = -1
file_info = AVbinFileInfo()
file_info.structure_size = ctypes.sizeof(file_info)
av.avbin_file_info(self._file, ctypes.byref(file_info))
self._duration = timestamp_from_avbin(file_info.duration)
self.info = SourceInfo()
self.info.title = file_info.title
self.info.author = file_info.author
self.info.copyright = file_info.copyright
self.info.comment = file_info.comment
self.info.album = file_info.album
self.info.year = file_info.year
self.info.track = file_info.track
self.info.genre = file_info.genre
# Pick the first video and audio streams found, ignore others.
for i in range(file_info.n_streams):
info = AVbinStreamInfo8()
info.structure_size = ctypes.sizeof(info)
av.avbin_stream_info(self._file, i, info)
if (info.type == AVBIN_STREAM_TYPE_VIDEO and
not self._video_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.video_format = VideoFormat(
width=info.u.video.width,
height=info.u.video.height)
if info.u.video.sample_aspect_num != 0:
self.video_format.sample_aspect = (
float(info.u.video.sample_aspect_num) /
info.u.video.sample_aspect_den)
if _have_frame_rate:
self.video_format.frame_rate = (
float(info.u.video.frame_rate_num) /
info.u.video.frame_rate_den)
self._video_stream = stream
self._video_stream_index = i
elif (info.type == AVBIN_STREAM_TYPE_AUDIO and
info.u.audio.sample_bits in (8, 16) and
info.u.audio.channels in (1, 2) and
not self._audio_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.audio_format = AudioFormat(
channels=info.u.audio.channels,
sample_size=info.u.audio.sample_bits,
sample_rate=info.u.audio.sample_rate)
self._audio_stream = stream
self._audio_stream_index = i
self._packet = AVbinPacket()
self._packet.structure_size = ctypes.sizeof(self._packet)
self._packet.stream_index = -1
self._events = []
# Timestamp of last video packet added to decoder queue.
self._video_timestamp = 0
self._buffered_audio_data = []
if self.audio_format:
self._audio_buffer = \
(ctypes.c_uint8 * av.avbin_get_audio_buffer_size())()
if self.video_format:
self._video_packets = []
self._decode_thread = WorkerThread()
self._decode_thread.start()
self._condition = threading.Condition()
def __del__(self):
if _debug:
print 'del avbin source'
try:
if self._video_stream:
av.avbin_close_stream(self._video_stream)
if self._audio_stream:
av.avbin_close_stream(self._audio_stream)
av.avbin_close_file(self._file)
except:
pass
# XXX TODO call this / add to source api
def delete(self):
if self.video_format:
self._decode_thread.stop()
def seek(self, timestamp):
if _debug:
print 'AVbin seek', timestamp
av.avbin_seek_file(self._file, timestamp_to_avbin(timestamp))
self._audio_packet_size = 0
del self._events[:]
del self._buffered_audio_data[:]
if self.video_format:
self._video_timestamp = 0
self._condition.acquire()
for packet in self._video_packets:
packet.image = None
self._condition.notify()
self._condition.release()
del self._video_packets[:]
self._decode_thread.clear_jobs()
def _get_packet(self):
# Read a packet into self._packet. Returns True if OK, False if no
# more packets are in stream.
return av.avbin_read(self._file, self._packet) == AVBIN_RESULT_OK
def _process_packet(self):
# Returns (packet_type, packet), where packet_type = 'video' or
# 'audio'; and packet is VideoPacket or AudioData. In either case,
# packet is buffered or queued for decoding; no further action is
# necessary. Returns (None, None) if packet was neither type.
if self._packet.stream_index == self._video_stream_index:
if self._packet.timestamp < 0:
# XXX TODO
# AVbin needs hack to decode timestamp for B frames in
# some containers (OGG?). See
# http://www.dranger.com/ffmpeg/tutorial05.html
# For now we just drop these frames.
return None, None
video_packet = VideoPacket(self._packet)
if _debug:
print 'Created and queued frame %d (%f)' % \
(video_packet.id, video_packet.timestamp)
self._video_timestamp = max(self._video_timestamp,
video_packet.timestamp)
self._video_packets.append(video_packet)
self._decode_thread.put_job(
lambda: self._decode_video_packet(video_packet))
return 'video', video_packet
elif self._packet.stream_index == self._audio_stream_index:
audio_data = self._decode_audio_packet()
if audio_data:
if _debug:
print 'Got an audio packet at', audio_data.timestamp
self._buffered_audio_data.append(audio_data)
return 'audio', audio_data
return None, None
def get_audio_data(self, bytes):
try:
audio_data = self._buffered_audio_data.pop(0)
audio_data_timeend = audio_data.timestamp + audio_data.duration
except IndexError:
audio_data = None
audio_data_timeend = self._video_timestamp + 1
if _debug:
print 'get_audio_data'
have_video_work = False
# Keep reading packets until we have an audio packet and all the
# associated video packets have been enqueued on the decoder thread.
while not audio_data or (
self._video_stream and self._video_timestamp < audio_data_timeend):
if not self._get_packet():
break
packet_type, packet = self._process_packet()
if packet_type == 'video':
have_video_work = True
elif not audio_data and packet_type == 'audio':
audio_data = self._buffered_audio_data.pop(0)
if _debug:
print 'Got requested audio packet at', audio_data.timestamp
audio_data_timeend = audio_data.timestamp + audio_data.duration
if have_video_work:
# Give decoder thread a chance to run before we return this audio
# data.
time.sleep(0)
if not audio_data:
if _debug:
print 'get_audio_data returning None'
return None
while self._events and self._events[0].timestamp <= audio_data_timeend:
event = self._events.pop(0)
if event.timestamp >= audio_data.timestamp:
event.timestamp -= audio_data.timestamp
audio_data.events.append(event)
if _debug:
print 'get_audio_data returning ts %f with events' % \
audio_data.timestamp, audio_data.events
print 'remaining events are', self._events
return audio_data
def _decode_audio_packet(self):
packet = self._packet
size_out = ctypes.c_int(len(self._audio_buffer))
while True:
audio_packet_ptr = ctypes.cast(packet.data, ctypes.c_void_p)
audio_packet_size = packet.size
used = av.avbin_decode_audio(self._audio_stream,
audio_packet_ptr, audio_packet_size,
self._audio_buffer, size_out)
if used < 0:
self._audio_packet_size = 0
break
audio_packet_ptr.value += used
audio_packet_size -= used
if size_out.value <= 0:
continue
# XXX how did this ever work? replaced with copy below
# buffer = ctypes.string_at(self._audio_buffer, size_out)
# XXX to actually copy the data.. but it never used to crash, so
# maybe I'm missing something
buffer = ctypes.create_string_buffer(size_out.value)
ctypes.memmove(buffer, self._audio_buffer, len(buffer))
buffer = buffer.raw
duration = float(len(buffer)) / self.audio_format.bytes_per_second
self._audio_packet_timestamp = \
timestamp = timestamp_from_avbin(packet.timestamp)
return AudioData(buffer, len(buffer), timestamp, duration, [])
def _decode_video_packet(self, packet):
width = self.video_format.width
height = self.video_format.height
pitch = width * 3
buffer = (ctypes.c_uint8 * (pitch * height))()
result = av.avbin_decode_video(self._video_stream,
packet.data, packet.size,
buffer)
if result < 0:
image_data = None
else:
image_data = image.ImageData(width, height, 'RGB', buffer, pitch)
packet.image = image_data
# Notify get_next_video_frame() that another one is ready.
self._condition.acquire()
self._condition.notify()
self._condition.release()
def _ensure_video_packets(self):
'''Process packets until a video packet has been queued (and begun
decoding). Return False if EOS.
'''
if not self._video_packets:
if _debug:
print 'No video packets...'
# Read ahead until we have another video packet
self._get_packet()
packet_type, _ = self._process_packet()
while packet_type and packet_type != 'video':
self._get_packet()
packet_type, _ = self._process_packet()
if not packet_type:
return False
if _debug:
print 'Queued packet', _
return True
def get_next_video_timestamp(self):
if not self.video_format:
return
if self._ensure_video_packets():
if _debug:
print 'Next video timestamp is', self._video_packets[0].timestamp
return self._video_packets[0].timestamp
def get_next_video_frame(self):
if not self.video_format:
return
if self._ensure_video_packets():
packet = self._video_packets.pop(0)
if _debug:
print 'Waiting for', packet
# Block until decoding is complete
self._condition.acquire()
while packet.image == 0:
self._condition.wait()
self._condition.release()
if _debug:
print 'Returning', packet
return packet.image
av.avbin_init()
if pyglet.options['debug_media']:
_debug = True
av.avbin_set_log_level(AVBIN_LOG_DEBUG)
else:
_debug = False
av.avbin_set_log_level(AVBIN_LOG_QUIET)
_have_frame_rate = av.avbin_have_feature(asbytes('frame_rate'))
|
|
import os
import sys
import traceback
import re
import time
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.linux as linux
import apibinding.inventory as inventory
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.header.volume as volume_header
import zstackwoodpecker.header.snapshot as sp_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
################################################################################
# vdbench_file.py output:
#The first time running vdbench in vm:
#disklist:/dev/disk/by-uuid/$disk_uuid:$disk_size
#
#Add and remove volume:
#add:/dev/disk/by-uuid/$disk_uuid:$disk_size
#remove:/dev/disk/by-uuid/$disk_uuid:$disk_size
#
#Resize volume:
#resize:/dev/disk/by-uuid/$disk_uuid:$disk_size
#
#No volume change:
#same disk
#
#Validate:
#validate successfully
#if validate failed:
#validate failed on $disk_path
#if all disk been removed:
#All old disks have been removed,skip validation
#
#run test:
#generate successfully
#if all disk been removed:
#no disk attached, skip generating
#################################################################################
class zstack_kvm_vm_attach_volume_checker(checker_header.TestChecker):
'''
Check if volume is really attached to vm inside vm
'''
def check(self):
super(zstack_kvm_vm_attach_volume_checker, self).check()
volume = self.test_obj.volume
vm = self.test_obj.target_vm.vm
if vm.state != "Running":
test_util.test_logger('Check result: Skip attach_volume_checker since VM is not in Running state')
return self.judge(True)
# test_lib.lib_install_testagent_to_vr(vm)
host = test_lib.lib_get_vm_host(vm)
test_lib.lib_install_testagent_to_host(host)
test_lib.lib_set_vm_host_l2_ip(vm)
default_l3_uuid = vm.defaultL3NetworkUuid
# vr = test_lib.lib_find_vr_by_pri_l3(default_l3_uuid)
# nic = test_lib.lib_get_vm_nic_by_vr(vm, vr)
nic = vm.vmNics[0]
command = 'cat /root/result'
cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), command, self.exp_result)
test_util.test_logger("czhou: %s" % cmd_result)
#If it's a virtio-scsi volume, check the wwn in the output
conditions = res_ops.gen_query_conditions('tag', '=', 'capability::virtio-scsi')
conditions = res_ops.gen_query_conditions('resourceUuid', '=', volume.uuid, conditions)
systemtag = res_ops.query_resource(res_ops.SYSTEM_TAG, conditions)
size = str(int(volume.size)/1024/1024)+'M' if int(volume.size)/1024/1024 < 1024 else str(int(volume.size)/1024/1024/1024)+'G'
if isinstance(cmd_result, str) and systemtag:
condition = res_ops.gen_query_conditions("resourceUuid", '=', volume.uuid)
for i in res_ops.query_resource(res_ops.SYSTEM_TAG, condition):
if re.split("::",i.tag)[0] == "kvm":
wwn = re.split("::",i.tag)[2]
for output in cmd_result.splitlines():
if "old disks:/dev/disk/by-id/wwn-"+wwn+"-part1:"+size in output:
disk_md5 = re.split(":",output)[3]
vol_md5 = self.test_obj.get_md5sum()
if disk_md5 == vol_md5:
test_util.test_logger("Checker result: Success to check md5sum of attached virtioscsi volume [%s] in vm " % wwn)
continue
else:
test_util.test_logger("Checker result: Fail to check md5sum of attached virtioscsi volume [%s] in vm " % wwn)
return self.judge(False)
if "new disks:/dev/disk/by-id/wwn-"+wwn+"-part1:"+size in output:
disk_md5 = re.split(":",output)[3]
self.test_obj.set_md5sum(disk_md5)
return self.judge(True)
test_util.test_logger("Checker result: Fail to check wwn of attached virtioscsi volume [%s] in vm" % wwn)
return self.judge(False)
#If it's a virtio-blk volume, we can only check the volume size and 'add' label in the output
if not systemtag:
#Skip virtio-blk check until we have a proper solution
test_util.test_logger("Checker result: Skip to check wwn of attached virtioblk volume [%s] in vm " % cmd_result)
return self.judge(False)
if re.split(":",cmd_result)[0] == "add" and re.split(":",cmd_result)[2] == size:
test_util.test_logger("Checker result: Success to check virtioblk attached volume [%s] in vm" % cmd_result)
return self.judge(True)
if "present disks" in cmd_result and size in cmd_result:
test_util.test_logger("Checker result: Success to attach virtioblk volume [%s] in vm" % cmd_result)
return self.judge(True)
if "present disks" in cmd_result and size not in cmd_result:
test_util.test_logger("Checker result: Success to attach virtioblk volume [%s] in vm" % cmd_result)
return self.judge(False)
test_util.test_logger("Checker result: Fail to check virtioblk attached volume [%s] in vm" % cmd_result)
return self.judge(False)
return self.judge(False)
class zstack_kvm_vm_detach_volume_checker(checker_header.TestChecker):
'''
Check if volume is really detached from vm inside vm
'''
def check(self):
super(zstack_kvm_vm_detach_volume_checker, self).check()
volume = self.test_obj.volume
vm = self.test_obj.target_vm.vm
if vm.state != "Running":
test_util.test_logger('Check result: Skip attach_volume_checker since VM is not in Running state')
return self.judge(True)
# test_lib.lib_install_testagent_to_vr(vm)
host = test_lib.lib_get_vm_host(vm)
test_lib.lib_install_testagent_to_host(host)
test_lib.lib_set_vm_host_l2_ip(vm)
default_l3_uuid = vm.defaultL3NetworkUuid
# vr = test_lib.lib_find_vr_by_pri_l3(default_l3_uuid)
# nic = test_lib.lib_get_vm_nic_by_vr(vm, vr)
nic = vm.vmNics[0]
command = 'cat /root/result'
cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), command, self.exp_result)
test_util.test_logger("czhou: %s" % cmd_result)
conditions = res_ops.gen_query_conditions('tag', '=', 'capability::virtio-scsi')
conditions = res_ops.gen_query_conditions('resourceUuid', '=', volume.uuid, conditions)
systemtag = res_ops.query_resource(res_ops.SYSTEM_TAG, conditions)
size = str(int(volume.size)/1024/1024)+'M' if int(volume.size)/1024/1024 < 1024 else str(int(volume.size)/1024/1024/1024)+'G'
#If it's a virtio-scsi volume, check the wwn in the output
if isinstance(cmd_result, str) and systemtag:
condition = res_ops.gen_query_conditions("resourceUuid", '=', volume.uuid)
for i in res_ops.query_resource(res_ops.SYSTEM_TAG, condition):
if re.split("::",i.tag)[0] == "kvm":
wwn = re.split("::",i.tag)[2]
if "old disks:/dev/disk/by-id/wwn-"+wwn+"-part1:"+size not in cmd_result and "new disks:/dev/disk/by-id/wwn-"+wwn+"-part1:"+size not in cmd_result:
test_util.test_logger("Checker result: Success to check wwn of detached virtioscsi volume [%s] in vm " % wwn)
return self.judge(True)
test_util.test_logger("Checker result: Fail to check wwn of detached virtioscsi volume [%s] in vm" % wwn)
return self.judge(False)
#If it's a virtio-blk volume, we can only check the volume size and 'remove' label in the output
if isinstance(cmd_result, str) and not systemtag:
#Skip virtio-blk check until we have a proper solution
test_util.test_logger("Checker result: Skip to check wwn of detached virtioblk volume [%s] in vm " % cmd_result)
return self.judge(False)
if re.split(":",cmd_result)[0] == "remove" and re.split(":",cmd_result)[2] == size:
test_util.test_logger("Checker result: Success to check virtioblk detached volume [%s] in vm" % cmd_result)
return self.judge(True)
if "present disks" in cmd_result and size not in cmd_result:
test_util.test_logger("Checker result: Success to detach virtioblk volume [%s] in vm" % cmd_result)
return self.judge(True)
if "present disks" in cmd_result and size in cmd_result:
test_util.test_logger("Checker result: Failed to detach virtioblk volume [%s] in vm" % cmd_result)
return self.judge(False)
test_util.test_logger("Checker result: Fail to check virtioblk detached volume [%s] in vm" % cmd_result)
return self.judge(False)
return self.judge(False)
class zstack_kvm_vm_data_integrity_checker(checker_header.TestChecker):
'''
Check data integrity inside vm using vdbench
'''
def check(self):
super(zstack_kvm_vm_data_integrity_checker, self).check()
if isinstance(self.test_obj, volume_header.TestVolume):
volume = self.test_obj.volume
vm = self.test_obj.target_vm.vm
if isinstance(self.test_obj, vm_header.TestVm):
vm = self.test_obj.vm
if isinstance(self.test_obj, zstack_sp_header.ZstackVolumeSnapshot):
volume_obj = self.test_obj.get_target_volume()
vm = volume_obj.get_target_vm()
if vm.state != "Running":
test_util.test_logger('Check result: Skip attach_volume_checker since VM is not in Running state')
return self.judge(True)
time.sleep(30)
#test_lib.lib_install_testagent_to_vr(vm)
host = test_lib.lib_get_vm_host(vm)
test_lib.lib_install_testagent_to_host(host)
test_lib.lib_set_vm_host_l2_ip(vm)
default_l3_uuid = vm.defaultL3NetworkUuid
#vr = test_lib.lib_find_vr_by_pri_l3(default_l3_uuid)
#nic = test_lib.lib_get_vm_nic_by_vr(vm, vr)
nic = vm.vmNics[0]
#print partition information
cmd = 'ls -l /dev/disk/by-id/'
cmd_res = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), cmd, self.exp_result)
test_util.test_logger("partition information: %s" % cmd_res)
#exec vdbench
command = 'python /root/vdbench_test.py | tee result'
cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), command, self.exp_result, 360)
test_util.test_logger("czhou: %s" % cmd_result)
if isinstance(cmd_result, str) and "generate successfully" in cmd_result:
test_util.test_logger("Checker result: Success to validate data integrity, output: %s" % cmd_result)
return self.judge(True)
if isinstance(cmd_result, str) and "no disk attached, skip generating" in cmd_result:
test_util.test_logger("Checker result: No validationg and no generating, output: %s" % cmd_result)
return self.judge(True)
#if isinstance(cmd_result, str) and "All old disks have been removed,skip validation" in cmd_result:
# if "generate successfully" in cmd_result or "skip generating" in cmd_result:
# test_util.test_logger("Checker result: Skip validation checker since all disks have been removed")
# return self.judge(True)
# else:
# test_util.test_logger("Checker result: Skip validation checker since all disks have been removed, but generating data failed on volume output: %s" % cmd_result)
# return self.judge(False)
#if isinstance(cmd_result, str) and "validate successfully" in cmd_result:
# if "generate successfully" in cmd_result or "skip generating" in cmd_result:
# test_util.test_logger("Checker result: Success to validate data integrity, output: %s" % cmd_result)
# return self.judge(True)
# else:
# test_util.test_logger("Checker result: Success to validate data integrity, but generating data failed on volume output: %s" % cmd_result)
# return self.judge(False)
#if isinstance(cmd_result, str) and "validate failed on" in cmd_result:
# test_util.test_logger("Checker result: Fail to validate data integrity, output: %s" % cmd_result)
# return self.judge(False)
return self.judge(False)
|
|
from distutils.version import StrictVersion
from datetime import datetime
from markupsafe import Markup
from flask import current_app
# //cdnjs.cloudflare.com/ajax/libs/moment.js/2.27.0/moment-with-locales.min.js
default_moment_version = '2.29.1'
default_moment_sri = ('sha512-LGXaggshOkD/at6PFNcp2V2unf9LzFq6LE+sChH7ceMTDP0'
'g2kn6Vxwgg7wkPP7AAtX+lmPqPdxB47A0Nz0cMQ==')
js_code = '''function flask_moment_render(elem) {{
const timestamp = moment(elem.dataset.timestamp);
const func = elem.dataset.function;
const format = elem.dataset.format;
const timestamp2 = elem.dataset.timestamp2;
const no_suffix = elem.dataset.nosuffix;
const units = elem.dataset.units;
let args = [];
if (format)
args.push(format);
if (timestamp2)
args.push(moment(timestamp2));
if (no_suffix)
args.push(no_suffix);
if (units)
args.push(units);
elem.textContent = timestamp[func].apply(timestamp, args);
elem.classList.remove('flask-moment');
elem.style.display = "";
}}
function flask_moment_render_all() {{
const moments = document.querySelectorAll('.flask-moment');
moments.forEach(function(moment) {{
flask_moment_render(moment);
const refresh = moment.dataset.refresh;
if (refresh && refresh > 0) {{
(function(elem, interval) {{
setInterval(function() {{
flask_moment_render(elem);
}}, interval);
}})(moment, refresh);
}}
}})
}}
document.addEventListener("DOMContentLoaded", flask_moment_render_all);'''
class moment(object):
"""Create a moment object.
:param timestamp: The ``datetime`` object representing the timestamp.
:param local: If ``True``, the ``timestamp`` argument is given in the
local client time. In most cases this argument will be set
to ``False`` and all the timestamps managed by the server
will be in the UTC timezone.
"""
@classmethod
def include_moment(cls, version=default_moment_version, local_js=None,
no_js=None, sri=None, with_locales=True):
"""Include the moment.js library and the supporting JavaScript code
used by this extension.
This function must be called in the ``<head>`` section of the Jinja
template(s) that use this extension.
:param version: The version of moment.js to include.
:param local_js: The URL to import the moment.js library from. Use this
option to import the library from a locally hosted
file.
:param no_js: Just add the supporting code for this extension, without
importing the moment.js library. . Use this option if
the library is imported elsewhere in the template. The
supporting JavaScript code for this extension is still
included.
:param sri: The SRI hash to use when importing the moment.js library,
or ``None`` if the SRI hash is unknown or disabled.
:param with_locales: If ``True``, include the version of moment.js that
has all the locales.
"""
mjs = ''
if version == default_moment_version and local_js is None and \
with_locales is True and sri is None:
sri = default_moment_sri
if not no_js:
if local_js is not None:
if not sri:
mjs = '<script src="{}"></script>\n'.format(local_js)
else:
mjs = ('<script src="{}" integrity="{}" '
'crossorigin="anonymous"></script>\n').format(
local_js, sri)
elif version is not None:
if with_locales:
js_filename = 'moment-with-locales.min.js' \
if StrictVersion(version) >= StrictVersion('2.8.0') \
else 'moment-with-langs.min.js'
else:
js_filename = 'moment.min.js'
if not sri:
mjs = ('<script src="https://cdnjs.cloudflare.com/ajax/'
'libs/moment.js/{}/{}"></script>\n').format(
version, js_filename)
else:
mjs = ('<script src="https://cdnjs.cloudflare.com/ajax/'
'libs/moment.js/{}/{}" integrity="{}" '
'crossorigin="anonymous"></script>\n').format(
version, js_filename, sri)
return Markup('{}\n<script>\n{}\n</script>\n'''.format(
mjs, cls.flask_moment_js()))
@staticmethod
def locale(language='en', auto_detect=False, customization=None):
"""Configure the moment.js locale.
:param language: The language code.
:param auto_detect: If ``True``, detect the locale from the browser.
:param customization: A dictionary with custom options for the locale,
as needed by the moment.js library.
"""
if auto_detect:
return Markup('<script>\nvar locale = '
'window.navigator.userLanguage || '
'window.navigator.language;\n'
'moment.locale(locale);\n</script>')
if customization:
return Markup(
'<script>\nmoment.locale("{}", {});\n</script>'.format(
language, customization))
return Markup(
'<script>\nmoment.locale("{}");\n</script>'.format(language))
@staticmethod
def flask_moment_js():
"""Return the JavaScript supporting code for this extension.
This method is provided to enable custom configurations that are not
supported by ``include_moment``. The return value of this method is
a string with raw JavaScript code. This code can be added to your own
``<script>`` tag in a template file::
<script>
{{ moment.flask_moment_js() }}
</script>
Alternatively, the code can be returned in a JavaScript endpoint that
can be loaded from the HTML file as an external resource::
@app.route('/flask-moment.js')
def flask_moment_js():
return (moment.flask_moment_js(), 200,
{'Content-Type': 'application/javascript'})
Note: only the code specific to Flask-Moment is included. When using
this method, you must include the moment.js library separately.
"""
default_format = ''
if 'MOMENT_DEFAULT_FORMAT' in current_app.config:
default_format = '\nmoment.defaultFormat = "{}";'.format(
current_app.config['MOMENT_DEFAULT_FORMAT'])
return '''moment.locale("en");{}\n{}'''.format(default_format, js_code)
@staticmethod
def lang(language):
"""Set the language. This is a simpler version of the :func:`locale`
function.
:param language: The language code to use.
"""
return moment.locale(language)
def __init__(self, timestamp=None, local=False):
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
self.local = local
def _timestamp_as_iso_8601(self, timestamp):
tz = ''
if not self.local:
tz = 'Z'
return timestamp.strftime('%Y-%m-%dT%H:%M:%S' + tz)
def _render(self, func, format=None, timestamp2=None, no_suffix=None,
units=None, refresh=False):
t = self._timestamp_as_iso_8601(self.timestamp)
data_values = 'data-function="{}"'.format(func)
if format:
data_values += ' data-format="{}"'.format(format)
if timestamp2:
data_values += ' data-timestamp2="{}"'.format(timestamp2)
if no_suffix:
data_values += ' data-nosuffix="1"'
if units:
data_values += ' data-units="{}"'.format(units)
return Markup(('<span class="flask-moment" data-timestamp="{}" ' +
'{} data-refresh="{}" ' +
'style="display: none">{}</span>').format(
t, data_values, int(refresh) * 60000, t))
def format(self, fmt=None, refresh=False):
"""Format a moment object with a custom formatting string.
:param fmt: The formatting specification to use, as documented by the
``format()`` function frommoment.js.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("format", format=(fmt or ''), refresh=refresh)
def fromNow(self, no_suffix=False, refresh=False):
"""Render the moment object as a relative time.
This formatting option is often called "time ago", since it renders
the timestamp using friendly text strings such as "2 hours ago" or
"in 3 weeks".
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("fromNow", no_suffix=int(no_suffix),
refresh=refresh)
def fromTime(self, timestamp, no_suffix=False, refresh=False):
"""Render the moment object as a relative time with respect to a
given reference time.
This function maps to the ``from()`` function from moment.js.
:param timestamp: The reference ``datetime`` object.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("from", timestamp2=self._timestamp_as_iso_8601(
timestamp), no_suffix=int(no_suffix), refresh=refresh)
def toNow(self, no_suffix=False, refresh=False):
"""Render the moment object as a relative time.
This function renders as the reverse time interval of ``fromNow()``.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("toNow", no_suffix=int(no_suffix), refresh=refresh)
def toTime(self, timestamp, no_suffix=False, refresh=False):
"""Render the moment object as a relative time with respect to a
given reference time.
This function maps to the ``to()`` function from moment.js.
:param timestamp: The reference ``datetime`` object.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("to", timestamp2=self._timestamp_as_iso_8601(
timestamp), no_suffix=int(no_suffix), refresh=refresh)
def calendar(self, refresh=False):
"""Render the moment object as a relative time, either to current time
or a given reference timestamp.
This function renders relative time using day references such as
tomorrow, next Sunday, etc.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("calendar", refresh=refresh)
def valueOf(self, refresh=False):
"""Render the moment object as milliseconds from Unix Epoch.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("valueOf", refresh=refresh)
def unix(self, refresh=False):
"""Render the moment object as seconds from Unix Epoch.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("unix", refresh=refresh)
def diff(self, timestamp, units, refresh=False):
"""Render the difference between the moment object and the given
timestamp using the provided units.
:param timestamp: The reference ``datetime`` object.
:param units: A time unit such as `years`, `months`, `weeks`, `days`,
`hours`, `minutes` or `seconds`.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("diff", timestamp2=self._timestamp_as_iso_8601(
timestamp), units=units, refresh=refresh)
class Moment(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['moment'] = moment
app.context_processor(self.context_processor)
@staticmethod
def context_processor():
return {
'moment': current_app.extensions['moment']
}
def flask_moment_js(self):
return current_app.extensions['moment'].flask_moment_js()
def create(self, timestamp=None):
return current_app.extensions['moment'](timestamp)
|
|
import sys
from enum import Enum
import itertools
import pygame
import snakes.plugins
from sortedcontainers import SortedList, SortedSet, SortedDict
snakes.plugins.load('gv', 'snakes.nets', 'nets')
from nets import *
################################################################
# (c) Copyright 2017 all right reserved
# Python Implementation of Alpha miner algorithm
# This implementation is inspired by the book
# "Process Mining Data science in action by WILL VAN DER AALST"
#
#
#
#
#
#################################################################
__author__ = "Bahra Mehdi"
__copyright__ = "Copyright 2017, The learning Project"
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "bahra.mehdi1@gmail.com"
__status__ = "Test"
# python enum class that represent possible relation between activities in an event log
#
#
#
class Relations(Enum):
SUCCESSIONS = '>'
RIGHT_CAUSALITY = '->'
LEFT_CAUSALITY = '<-'
PARALLEL = '||'
CHOICES = '#'
# Alpha Miner class
class AlphaMiner:
def __init__(self,Traces):
# Traces within an event log
self.traces = Traces
# set of transition a.k.a activities or T
self.transitions = SortedSet()
# set of initial transitions Ti
self.initial_transitions = SortedSet()
# set of final transitions To
self.final_transitions = SortedSet()
# set of pairs (A,B) Xl
self.pairs = []
# set of maximal pairs (A,B) Yl
self.maxi_pairs = []
# set of p(A,B) between maxi_pairs Pl
self.places = []
# Footprint , relations between activities
self.relations = SortedDict()
#Petri NET
self.PetriNet = None
def getTransitions(self):
#Lemme 1
for trace in self.traces.values():
for activity in trace:
self.transitions.add(activity)
return self.transitions
def getInitialTransitions(self):
# Lemme 2
#For each trace get the first activity
#and add it to a set of initial transitions
for trace in self.traces.values():
print(trace[0])
self.initial_transitions.add(trace[0])
return self.initial_transitions;
def getFinalTransitions(self):
#For each trace get the first activity
#and add it to a set of initial transitions
for trace in self.traces.values():
print(trace[len(trace)-1])
self.final_transitions.add(trace[len(trace)-1])
return self.final_transitions
def extractRelations(self):
#Extract non repetitive traces, alpha dont take care about frequencies !
nnrep_traces = SortedSet()
for trace in self.traces.values():
nnrep_traces.add("".join(trace))
print(nnrep_traces)
#Extract relations between each transitions
# generate Footprint
for transition1 in self.transitions:
self.relations[transition1] = SortedDict()
for transition2 in self.transitions:
concat = transition1+transition2
print(concat)
relation = None
for trace in nnrep_traces:
if trace.find(concat) >= 0:
#Causality
print(concat)
if relation == Relations.LEFT_CAUSALITY:
relation = Relations.PARALLEL
else:
relation = Relations.RIGHT_CAUSALITY
if trace.find(concat[::-1]) >= 0:
print(concat[::-1])
if relation == Relations.RIGHT_CAUSALITY:
relation = Relations.PARALLEL
else:
relation = Relations.LEFT_CAUSALITY
if relation == None:
relation = Relations.CHOICES
self.relations[transition1][transition2] = relation
return self.relations
def computePairs(self):
# extract pairs of set , each set contain activities
#that doesnt have any relation between them and the activities in the two set have to be direcly successed by each other
#Lemme 4
pairs_causality = []
pairs_choices = []
pairs = []
#Extract all possible pairs of activity with causality relation
for activity1 ,relations1 in self.relations.items():
for activity2 , relation in relations1.items():
if relation == Relations.RIGHT_CAUSALITY :
pairs_causality.append((activity1,activity2))
if relation == Relations.CHOICES:
if activity1 == activity2:
pairs_choices.append((activity1,))
else:
pairs_choices.append((activity1,activity2))
print(pairs_causality)
pairs= pairs_causality
print(pairs_choices)
# find all possible sets of activity with causality relation
#
i = 0
j = len(pairs_choices)
while i < j :
seti = pairs_choices[i]
for pair in pairs_choices:
union = True
if len(SortedSet(seti).intersection(SortedSet(pair))) != 0:
for e1 in pair:
if union == False:
break
for e2 in seti:
if self.relations[e1][e2] != Relations.CHOICES:
union = False
break
if union :
new_pair = SortedSet(seti) | SortedSet(pair)
if tuple(new_pair) not in pairs_choices:
pairs_choices.append(tuple(new_pair))
j = j + 1
#Reevaluate the length
i = i + 1
print(pairs_choices)
# Union
for pair_choices1 in pairs_choices:
for pair_choices2 in pairs_choices:
relation_between_pair = None
makePair = True
print("pair 1",pair_choices1)
print("pair 2",pair_choices2)
intersection = SortedSet(pair_choices1).intersection(pair_choices2)
pair_choices2 = SortedSet(pair_choices2)
if len(intersection) != 0 :
# remove intersection terms in the second pair
for term in intersection:
pair_choices2.discard(term)
if(len(pair_choices2) == 0):
continue
pair_choices2= tuple(pair_choices2)
print("pair_choices2 with discarded term :",pair_choices2)
for activity1 in pair_choices1:
print(activity1)
if makePair == False:
break
for activity2 in pair_choices2:
print(activity2)
relation = self.relations[activity1][activity2]
if relation_between_pair != None and relation_between_pair != relation:
makePair = False
break
else:
relation_between_pair = relation
if relation != Relations.RIGHT_CAUSALITY:
makePair = False
break
if makePair == True:
print("makepair true")
print(pair_choices1)
print(pair_choices2)
if relation_between_pair == Relations.RIGHT_CAUSALITY:
new_pair = (pair_choices1,pair_choices2)
else:
new_pair = (pair_choices2,pair_choices1)
pairs.append(new_pair)
print("\n")
print("\n")
print(pairs)
self.pairs = pairs
'''
combinations = list(itertools.combinations(list(self.transitions),len(self.transitions)))
possible_successions = SortedSet()
for combination in combinations:
combination = "".join(combination)
possible_successions.add(combination)
print(possible_successions)'''
def extract_maximal_pairs(self):
pos1 =0
pair_appended = []
maxi_pairs = []
for pair1 in self.pairs:
append = True
# flat the pair 1
flat_pair1 = []
for s in pair1:
for e in s:
flat_pair1.append(e)
print("pair1 :",pair1)
print("flat_pair1 :",flat_pair1)
pos2 = 0
for pair2 in self.pairs:
if pos1 != pos2:
flat_pair2 = []
for s in pair2:
for e in s:
flat_pair2.append(e)
print("pair2 :",pair2)
print("flat_pair2 :",flat_pair2)
# flat the pair 1
# flat the pair 2
# check if pair1 issubset of pair 2 or pair 2 is subset of 1
if SortedSet(flat_pair1).issubset(flat_pair2) and SortedSet(flat_pair1)!= SortedSet(flat_pair2):
print("issubset")
append = False
pos2 = pos2 + 1
if append == True:
print("append")
if SortedSet(flat_pair1) not in pair_appended:
maxi_pairs.append(pair1)
pair_appended.append(SortedSet(flat_pair1))
pos1 = pos1 + 1
print(maxi_pairs)
self.maxi_pairs = maxi_pairs
#Lemme 5
pass
def add_places(self):
#Lemme 6
# connect initial transition with place
cpt = 0
self.places.append(("P"+str(cpt),self.initial_transitions))
cpt = 1
for pair in self.maxi_pairs:
self.places.append((pair[0],"P"+str(cpt),pair[1]))
cpt+=1
self.places.append((self.final_transitions,"P"+str(cpt)))
print(self.places)
def extract_PetriNet(self):
n = PetriNet('N')
n.add_place(Place('p'+str(0)))
cpt_p = 1
for pair in self.maxi_pairs:
n.add_place(Place('p'+str(cpt_p)))
cpt_p += 1
n.add_place(Place('p'+str(cpt_p)))
for transition in self.transitions:
n.add_transition(Transition(transition))
print(self.initial_transitions)
for transition in self.initial_transitions:
n.add_input('p'+str(0),transition,Value(dot))
cpt_p = 1
for pair in self.maxi_pairs:
#pair[0] produce
#pair[1] consume
for transition in pair[0]:
n.add_output('p'+str(cpt_p), transition,Value(dot))
for transition in pair[1]:
n.add_input('p'+str(cpt_p), transition,Value(dot))
cpt_p+=1
for transition in self.final_transitions:
n.add_output('p'+str(cpt_p),transition,Value(dot))
self.PetriNet = n
def show(self,model = None):
def draw_place (place, attr) :
attr['label'] = place.name.upper()
attr['color'] = '#FF0000'
def draw_transition (trans, attr) :
if str(trans.guard) == 'True' :
attr['label'] = trans.name
else :
attr['label'] = '%s\n%s' % (trans.name, trans.guard)
self.PetriNet.draw(',net-with-colors.png',place_attr=draw_place, trans_attr=draw_transition)
import pygame
pygame.init()
size = width, height = 1200, 682
WHITE = (255, 255, 255)
screen = pygame.display.set_mode(size)
screen.fill(WHITE)
pygame.display.set_caption("petri net alphaminer")
petri_net = pygame.image.load(",net-with-colors.png").convert()
surf = pygame.transform.rotate(petri_net, 90)
screen.blit(surf, (50, 0))
pygame.display.flip()
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):
done = True
break
|
|
"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only)
Functions for reading and writing raw Type 1 data:
read(path)
reads any Type 1 font file, returns the raw data and a type indicator:
'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed
to by 'path'.
Raises an error when the file does not contain valid Type 1 data.
write(path, data, kind='OTHER', dohex=False)
writes raw Type 1 data to the file pointed to by 'path'.
'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
'dohex' is a flag which determines whether the eexec encrypted
part should be written as hexadecimal or binary, but only if kind
is 'OTHER'.
"""
from fontTools.misc.py23 import bytechr, byteord, bytesjoin
from fontTools.misc import eexec
from fontTools.misc.macCreatorType import getMacCreatorAndType
import os
import re
__author__ = "jvr"
__version__ = "1.0b2"
DEBUG = 0
try:
try:
from Carbon import Res
except ImportError:
import Res # MacPython < 2.2
except ImportError:
haveMacSupport = 0
else:
haveMacSupport = 1
class T1Error(Exception): pass
class T1Font(object):
"""Type 1 font class.
Uses a minimal interpeter that supports just about enough PS to parse
Type 1 fonts.
"""
def __init__(self, path, encoding="ascii", kind=None):
if kind is None:
self.data, _ = read(path)
elif kind == "LWFN":
self.data = readLWFN(path)
elif kind == "PFB":
self.data = readPFB(path)
elif kind == "OTHER":
self.data = readOther(path)
else:
raise ValueError(kind)
self.encoding = encoding
def saveAs(self, path, type, dohex=False):
write(path, self.getData(), type, dohex)
def getData(self):
# XXX Todo: if the data has been converted to Python object,
# recreate the PS stream
return self.data
def getGlyphSet(self):
"""Return a generic GlyphSet, which is a dict-like object
mapping glyph names to glyph objects. The returned glyph objects
have a .draw() method that supports the Pen protocol, and will
have an attribute named 'width', but only *after* the .draw() method
has been called.
In the case of Type 1, the GlyphSet is simply the CharStrings dict.
"""
return self["CharStrings"]
def __getitem__(self, key):
if not hasattr(self, "font"):
self.parse()
return self.font[key]
def parse(self):
from fontTools.misc import psLib
from fontTools.misc import psCharStrings
self.font = psLib.suckfont(self.data, self.encoding)
charStrings = self.font["CharStrings"]
lenIV = self.font["Private"].get("lenIV", 4)
assert lenIV >= 0
subrs = self.font["Private"]["Subrs"]
for glyphName, charString in charStrings.items():
charString, R = eexec.decrypt(charString, 4330)
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
subrs=subrs)
for i in range(len(subrs)):
charString, R = eexec.decrypt(subrs[i], 4330)
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
del self.data
# low level T1 data read and write functions
def read(path, onlyHeader=False):
"""reads any Type 1 font file, returns raw data"""
_, ext = os.path.splitext(path)
ext = ext.lower()
creator, typ = getMacCreatorAndType(path)
if typ == 'LWFN':
return readLWFN(path, onlyHeader), 'LWFN'
if ext == '.pfb':
return readPFB(path, onlyHeader), 'PFB'
else:
return readOther(path), 'OTHER'
def write(path, data, kind='OTHER', dohex=False):
assertType1(data)
kind = kind.upper()
try:
os.remove(path)
except os.error:
pass
err = 1
try:
if kind == 'LWFN':
writeLWFN(path, data)
elif kind == 'PFB':
writePFB(path, data)
else:
writeOther(path, data, dohex)
err = 0
finally:
if err and not DEBUG:
try:
os.remove(path)
except os.error:
pass
# -- internal --
LWFNCHUNKSIZE = 2000
HEXLINELENGTH = 80
def readLWFN(path, onlyHeader=False):
"""reads an LWFN font file, returns raw data"""
from fontTools.misc.macRes import ResourceReader
reader = ResourceReader(path)
try:
data = []
for res in reader.get('POST', []):
code = byteord(res.data[0])
if byteord(res.data[1]) != 0:
raise T1Error('corrupt LWFN file')
if code in [1, 2]:
if onlyHeader and code == 2:
break
data.append(res.data[2:])
elif code in [3, 5]:
break
elif code == 4:
with open(path, "rb") as f:
data.append(f.read())
elif code == 0:
pass # comment, ignore
else:
raise T1Error('bad chunk code: ' + repr(code))
finally:
reader.close()
data = bytesjoin(data)
assertType1(data)
return data
def readPFB(path, onlyHeader=False):
"""reads a PFB font file, returns raw data"""
data = []
with open(path, "rb") as f:
while True:
if f.read(1) != bytechr(128):
raise T1Error('corrupt PFB file')
code = byteord(f.read(1))
if code in [1, 2]:
chunklen = stringToLong(f.read(4))
chunk = f.read(chunklen)
assert len(chunk) == chunklen
data.append(chunk)
elif code == 3:
break
else:
raise T1Error('bad chunk code: ' + repr(code))
if onlyHeader:
break
data = bytesjoin(data)
assertType1(data)
return data
def readOther(path):
"""reads any (font) file, returns raw data"""
with open(path, "rb") as f:
data = f.read()
assertType1(data)
chunks = findEncryptedChunks(data)
data = []
for isEncrypted, chunk in chunks:
if isEncrypted and isHex(chunk[:4]):
data.append(deHexString(chunk))
else:
data.append(chunk)
return bytesjoin(data)
# file writing tools
def writeLWFN(path, data):
# Res.FSpCreateResFile was deprecated in OS X 10.5
Res.FSpCreateResFile(path, "just", "LWFN", 0)
resRef = Res.FSOpenResFile(path, 2) # write-only
try:
Res.UseResFile(resRef)
resID = 501
chunks = findEncryptedChunks(data)
for isEncrypted, chunk in chunks:
if isEncrypted:
code = 2
else:
code = 1
while chunk:
res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
res.AddResource('POST', resID, '')
chunk = chunk[LWFNCHUNKSIZE - 2:]
resID = resID + 1
res = Res.Resource(bytechr(5) + '\0')
res.AddResource('POST', resID, '')
finally:
Res.CloseResFile(resRef)
def writePFB(path, data):
chunks = findEncryptedChunks(data)
with open(path, "wb") as f:
for isEncrypted, chunk in chunks:
if isEncrypted:
code = 2
else:
code = 1
f.write(bytechr(128) + bytechr(code))
f.write(longToString(len(chunk)))
f.write(chunk)
f.write(bytechr(128) + bytechr(3))
def writeOther(path, data, dohex=False):
chunks = findEncryptedChunks(data)
with open(path, "wb") as f:
hexlinelen = HEXLINELENGTH // 2
for isEncrypted, chunk in chunks:
if isEncrypted:
code = 2
else:
code = 1
if code == 2 and dohex:
while chunk:
f.write(eexec.hexString(chunk[:hexlinelen]))
f.write(b'\r')
chunk = chunk[hexlinelen:]
else:
f.write(chunk)
# decryption tools
EEXECBEGIN = b"currentfile eexec"
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
# follow eexec
EEXECEND = re.compile(b'(0[ \t\r\n]*){512}', flags=re.M)
EEXECINTERNALEND = b"currentfile closefile"
EEXECBEGINMARKER = b"%-- eexec start\r"
EEXECENDMARKER = b"%-- eexec end\r"
_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
def isHex(text):
return _ishexRE.match(text) is not None
def decryptType1(data):
chunks = findEncryptedChunks(data)
data = []
for isEncrypted, chunk in chunks:
if isEncrypted:
if isHex(chunk[:4]):
chunk = deHexString(chunk)
decrypted, R = eexec.decrypt(chunk, 55665)
decrypted = decrypted[4:]
if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
raise T1Error("invalid end of eexec part")
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
else:
if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
data.append(chunk[:-len(EEXECBEGIN)-1])
else:
data.append(chunk)
return bytesjoin(data)
def findEncryptedChunks(data):
chunks = []
while True:
eBegin = data.find(EEXECBEGIN)
if eBegin < 0:
break
eBegin = eBegin + len(EEXECBEGIN) + 1
endMatch = EEXECEND.search(data, eBegin)
if endMatch is None:
raise T1Error("can't find end of eexec part")
eEnd = endMatch.start()
cypherText = data[eBegin:eEnd + 2]
if isHex(cypherText[:4]):
cypherText = deHexString(cypherText)
plainText, R = eexec.decrypt(cypherText, 55665)
eEndLocal = plainText.find(EEXECINTERNALEND)
if eEndLocal < 0:
raise T1Error("can't find end of eexec part")
chunks.append((0, data[:eBegin]))
chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1]))
data = data[eEnd:]
chunks.append((0, data))
return chunks
def deHexString(hexstring):
return eexec.deHexString(bytesjoin(hexstring.split()))
# Type 1 assertion
_fontType1RE = re.compile(br"/FontType\s+1\s+def")
def assertType1(data):
for head in [b'%!PS-AdobeFont', b'%!FontType1']:
if data[:len(head)] == head:
break
else:
raise T1Error("not a PostScript font")
if not _fontType1RE.search(data):
raise T1Error("not a Type 1 font")
if data.find(b"currentfile eexec") < 0:
raise T1Error("not an encrypted Type 1 font")
# XXX what else?
return data
# pfb helpers
def longToString(long):
s = b""
for i in range(4):
s += bytechr((long & (0xff << (i * 8))) >> i * 8)
return s
def stringToLong(s):
if len(s) != 4:
raise ValueError('string must be 4 bytes long')
l = 0
for i in range(4):
l += byteord(s[i]) << (i * 8)
return l
|
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ConnectLogs(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'failures': 'list[ConnectLog]',
'logs': 'list[ConnectLog]',
'total_records': 'str',
'type': 'str'
}
attribute_map = {
'failures': 'failures',
'logs': 'logs',
'total_records': 'totalRecords',
'type': 'type'
}
def __init__(self, failures=None, logs=None, total_records=None, type=None): # noqa: E501
"""ConnectLogs - a model defined in Swagger""" # noqa: E501
self._failures = None
self._logs = None
self._total_records = None
self._type = None
self.discriminator = None
if failures is not None:
self.failures = failures
if logs is not None:
self.logs = logs
if total_records is not None:
self.total_records = total_records
if type is not None:
self.type = type
@property
def failures(self):
"""Gets the failures of this ConnectLogs. # noqa: E501
An array of containing failure information from the Connect failure log. # noqa: E501
:return: The failures of this ConnectLogs. # noqa: E501
:rtype: list[ConnectLog]
"""
return self._failures
@failures.setter
def failures(self, failures):
"""Sets the failures of this ConnectLogs.
An array of containing failure information from the Connect failure log. # noqa: E501
:param failures: The failures of this ConnectLogs. # noqa: E501
:type: list[ConnectLog]
"""
self._failures = failures
@property
def logs(self):
"""Gets the logs of this ConnectLogs. # noqa: E501
A complex type containing Connect log information. It is divided into two sections, one for regular logs and one for Connect failures. # noqa: E501
:return: The logs of this ConnectLogs. # noqa: E501
:rtype: list[ConnectLog]
"""
return self._logs
@logs.setter
def logs(self, logs):
"""Sets the logs of this ConnectLogs.
A complex type containing Connect log information. It is divided into two sections, one for regular logs and one for Connect failures. # noqa: E501
:param logs: The logs of this ConnectLogs. # noqa: E501
:type: list[ConnectLog]
"""
self._logs = logs
@property
def total_records(self):
"""Gets the total_records of this ConnectLogs. # noqa: E501
# noqa: E501
:return: The total_records of this ConnectLogs. # noqa: E501
:rtype: str
"""
return self._total_records
@total_records.setter
def total_records(self, total_records):
"""Sets the total_records of this ConnectLogs.
# noqa: E501
:param total_records: The total_records of this ConnectLogs. # noqa: E501
:type: str
"""
self._total_records = total_records
@property
def type(self):
"""Gets the type of this ConnectLogs. # noqa: E501
# noqa: E501
:return: The type of this ConnectLogs. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ConnectLogs.
# noqa: E501
:param type: The type of this ConnectLogs. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConnectLogs, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConnectLogs):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.proto.api import v2c
from pysnmp import debug
import os
import argparse
import time
import string
####### The SNMP Agent Daemon #######
def agent(verbose,quiet,server_ip0,server_port0,
snmp_ver0,community0,
authpriv0,v3auth0,v3priv0,
user0,authkey0,privkey0,filepath0,
engineid0='8000000001020304'):
if verbose==True:
debug.setLogger(debug.Debug('all'))
server_port0=int(server_port0)
# Create SNMP engine with autogenernated engineID and pre-bound
# to socket transport dispatcher
snmpEngine=engine.SnmpEngine()
#print type(engineid3), engineid3
config.addSocketTransport(
snmpEngine,udp.domainName,
udp.UdpTransport().openServerMode((server_ip0, server_port0))
)
# This if tree sorts out the command line options into pysnmp commands.
########################## SNMP VERSION ONE/TWO ###########################
if snmp_ver0=='1':
config.CommunityData(community0, 1)
if snmp_ver0=='2':
config.CommunityData(community0, 2)
######################### SNMP VERSION THREE IF TREE ######################
if snmp_ver0=='3' and authpriv0=='00':
config.addV3User(
snmpEngine, user0,
config.NoPrivProtocol,
config.NoAuthProtocol
)
if snmp_ver0=='3' and authpriv0=='10':
if v3auth0=='MD5':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.NoAuthProtocol
)
if v3auth0=='SHA':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.NoAuthProtocol)
############## SNMPV3 WITH MD5 AUTH AND PRIV ###############
if snmp_ver0=='3' and authpriv0=='11':
if v3auth0=='MD5' and v3priv0=='DES':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.usmDESPrivProtocol, privkey0
)
if v3auth0=='MD5' and v3priv0=='3DES':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.usm3DESEDEPrivProtocol, privkey0
)
if v3auth0=='MD5' and v3priv0=='AES128':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.usmAesCfb128Protocol, privkey0
)
if v3auth0=='MD5' and v3priv0=='AES192':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.usmAesCfb192Protocol, privkey0
)
if v3auth0=='MD5' and v3priv0=='AES256':
config.addV3User(
snmpEngine,user0,
config.usmHMACMD5AuthProtocol, authkey0,
config.usmAesCfb256Protocol, privkey0
)
#### SHA AUTH ###
if v3auth0=='SHA' and v3priv0=='DES':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.usmDESPrivProtocol, privkey0
)
if v3auth0=='SHA' and v3priv0=='3DES':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.usm3DESEDEPrivProtocol, privkey0
)
if v3auth0=='SHA' and v3priv0=='AES128':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.usmAesCfb128Protocol, privkey0
)
if v3auth0=='SHA' and v3priv0=='AES192':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.usmAesCfb192Protocol, privkey0
)
if v3auth0=='SHA' and v3priv0=='AES256':
config.addV3User(
snmpEngine,user0,
config.usmHMACSHAAuthProtocol, authkey0,
config.usmAesCfb256Protocol, privkey0
)
# Callback function for receiving notifications
def cbFun(snmpEngine,
stateReference,
contextEngineId, contextName,
varBinds,cbCtx):
saveit=open(filepath0,'a')
saveit.write("----------------------------------- \n")
output1=string.join(('Notification received:',
str(time.strftime("%d/%m/%Y-%H:%M:%S")),'\n'
'ContextEngineId:',contextEngineId.prettyPrint(),'\n'
'ContextName:',contextName.prettyPrint(),'\n'))
if quiet!=True:
print output1
saveit.write(output1)
for name, val in varBinds:
output2='%s = %s \n' % (name.prettyPrint(), val.prettyPrint())
saveit.write(output2)
if quiet!=True:
print output2
saveit.write('\n')
saveit.close()
# Register SNMP Application at the SNMP engine
ntfrcv.NotificationReceiver(snmpEngine, cbFun)
print "Starting the pysnmp reciever agent."
# this job would never finish
snmpEngine.transportDispatcher.jobStarted(1)
# Run I/O dispatcher which would receive queries and send confirmations
try:
snmpEngine.transportDispatcher.runDispatcher()
except:
snmpEngine.transportDispatcher.closeDispatcher()
raise
#snmptrap -v 3 -a SHA -A authkey1 -u user -l authPriv -x AES -X privkey1 -L o: 10.5.1.156 163 1.3.6.1.6.3.1.1.5.1
#metavar='verbose',
if __name__ == '__main__':
parser=argparse.ArgumentParser(description='This is a crossplatform SNMPv1,2c,3 reciever. The syntax is similar to net-snmp. 99 percent of the work should be credited to the pysnmp project.')
parser.add_argument('--verbose','--verb','-V',action='store_true',
required=False,help='-v is bound to version.')
parser.add_argument('-L',dest='server_ip',action='store',
required=True,default='127.0.0.1',help='Local SNMP reciever IP.')
parser.add_argument('-p',dest='server_port',action='store',
required=False,default='162',help='Local SNMP reciever port. Default UDP 162.')
parser.add_argument('-v',dest='version',action='store',choices=['1', '2c', '3'],
required=True,help='SNMP version: 1,2c or 3')
parser.add_argument('-c',dest='community',action='store',
required=False,default='public',help='Community for v1 and v2c')
parser.add_argument('-l',dest='authpriv',action='store',choices=['00','10','11'],
required=True,help='Enter 11 for AuthPriv or 00 for noAuthnoPriv')
parser.add_argument('-a',dest='auth_hash',action='store',choices=['MD5','SHA'],
required=True,help='Hash type: MD5 or SHA')
parser.add_argument('-x',dest='priv_enc',action='store',choices=['DES','3DES','AES','AES128','AES256'],
required=True,help='Priv encryption: DES, 3DES, AES128 or AES256')
parser.add_argument('-u',dest='user',action='store',
required=True,help='Username')
parser.add_argument('-A',dest='authkey',action='store',
required=True,help='Authentication hash key')
parser.add_argument('-X',dest='privkey',action='store',
required=True,help='Priv encryption key')
parser.add_argument('-e',dest='engineid',action='store',
required=False,help='SNMP engine id')
parser.add_argument('-f',dest='filepath',action='store',
required=True,help='File location for storing SNMP trap events.')
parser.add_argument('-q','--quiet',dest='quiet',action='store_true',required=False,help='Disable noisy output.')
args=parser.parse_args()
# Default settings.
if args.server_port=="":
args.serverport=162
if args.priv_enc=="AES":
args.priv_enc=="AES128"
#print(args.verbose,args.server_ip,args.server_port,args.version,
# args.community,args.authpriv,args.auth_hash,args.priv_enc,args.user,
# args.authkey,args.privkey,args.filepath,args.engineid)
agent(args.verbose,args.quiet,args.server_ip,args.server_port,args.version,args.community,args.authpriv,
args.auth_hash,args.priv_enc,args.user,args.authkey,
args.privkey,args.filepath,args.engineid,)
# snmptrap -v 3 -a SHA -A authkey1 -u user -l authPriv -x AES -X privkey1 -L o: 10.5.1.156 163 1.3.6.1.6.3.1.1.5.1
|
|
import sys
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.importlib import import_module
import mock
from oscar.test.testcases import WebTestCase
from oscar.test import factories
from . import CheckoutMixin
# Python 3 compat
try:
from imp import reload
except ImportError:
pass
def reload_url_conf():
# Reload URLs to pick up the overridden settings
if settings.ROOT_URLCONF in sys.modules:
reload(sys.modules[settings.ROOT_URLCONF])
import_module(settings.ROOT_URLCONF)
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestIndexView(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestIndexView, self).setUp()
def test_redirects_customers_with_empty_basket(self):
response = self.get(reverse('checkout:index'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_with_invalid_basket(self):
# Add product to basket but then remove its stock so it is not
# purchasable.
product = factories.create_product(num_in_stock=1)
self.add_product_to_basket(product)
product.stockrecords.all().update(num_in_stock=0)
response = self.get(reverse('checkout:index'))
self.assertRedirectUrlName(response, 'basket:summary')
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestShippingAddressView(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestShippingAddressView, self).setUp()
def test_redirects_customers_with_empty_basket(self):
response = self.get(reverse('checkout:shipping-address'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_who_have_skipped_guest_form(self):
self.add_product_to_basket()
response = self.get(reverse('checkout:shipping-address'))
self.assertRedirectUrlName(response, 'checkout:index')
def test_redirects_customers_whose_basket_doesnt_require_shipping(self):
product = self.create_digital_product()
self.add_product_to_basket(product)
self.enter_guest_details()
response = self.get(reverse('checkout:shipping-address'))
self.assertRedirectUrlName(response, 'checkout:shipping-method')
def test_redirects_customers_with_invalid_basket(self):
# Add product to basket but then remove its stock so it is not
# purchasable.
product = factories.create_product(num_in_stock=1)
self.add_product_to_basket(product)
self.enter_guest_details()
product.stockrecords.all().update(num_in_stock=0)
response = self.get(reverse('checkout:shipping-address'))
self.assertRedirectUrlName(response, 'basket:summary')
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestShippingMethodView(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestShippingMethodView, self).setUp()
def test_redirects_customers_with_empty_basket(self):
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_with_invalid_basket(self):
product = factories.create_product(num_in_stock=1)
self.add_product_to_basket(product)
self.enter_guest_details()
self.enter_shipping_address()
product.stockrecords.all().update(num_in_stock=0)
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_who_have_skipped_guest_form(self):
self.add_product_to_basket()
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'checkout:index')
def test_redirects_customers_whose_basket_doesnt_require_shipping(self):
product = self.create_digital_product()
self.add_product_to_basket(product)
self.enter_guest_details()
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'checkout:payment-method')
def test_redirects_customers_who_have_skipped_shipping_address_form(self):
self.add_product_to_basket()
self.enter_guest_details()
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'checkout:shipping-address')
@mock.patch('oscar.apps.checkout.views.Repository')
def test_redirects_customers_when_no_shipping_methods_available(self, mock_repo):
self.add_product_to_basket()
self.enter_guest_details()
self.enter_shipping_address()
# Ensure no shipping methods available
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = []
response = self.get(reverse('checkout:shipping-address'))
self.assertIsOk(response)
@mock.patch('oscar.apps.checkout.views.Repository')
def test_redirects_customers_when_only_one_shipping_methods_available(self, mock_repo):
self.add_product_to_basket()
self.enter_guest_details()
self.enter_shipping_address()
# Ensure one shipping method available
method = mock.MagicMock()
method.code = 'm'
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = [method]
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'checkout:payment-method')
@mock.patch('oscar.apps.checkout.views.Repository')
def test_shows_form_when_multiple_shipping_methods_available(self, mock_repo):
self.add_product_to_basket()
self.enter_guest_details()
self.enter_shipping_address()
# Ensure multiple shipping methods available
method = mock.MagicMock()
method.code = 'm'
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = [method, method]
response = self.get(reverse('checkout:shipping-method'))
self.assertIsOk(response)
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestPaymentMethodView(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestPaymentMethodView, self).setUp()
def test_redirects_customers_with_empty_basket(self):
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_with_invalid_basket(self):
product = factories.create_product(num_in_stock=1)
self.add_product_to_basket(product)
self.enter_guest_details()
self.enter_shipping_address()
product.stockrecords.all().update(num_in_stock=0)
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_who_have_skipped_guest_form(self):
self.add_product_to_basket()
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'checkout:index')
def test_redirects_customers_who_have_skipped_shipping_address_form(self):
self.add_product_to_basket()
self.enter_guest_details()
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_redirects_customers_who_have_skipped_shipping_method_step(self):
self.add_product_to_basket()
self.enter_guest_details()
self.enter_shipping_address()
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'checkout:shipping-method')
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestPaymentDetailsView(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestPaymentDetailsView, self).setUp()
def test_redirects_customers_with_empty_basket(self):
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_with_invalid_basket(self):
product = factories.create_product(num_in_stock=1)
self.add_product_to_basket(product)
self.enter_guest_details()
self.enter_shipping_address()
product.stockrecords.all().update(num_in_stock=0)
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_redirects_customers_who_have_skipped_guest_form(self):
self.add_product_to_basket()
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'checkout:index')
def test_redirects_customers_who_have_skipped_shipping_address_form(self):
self.add_product_to_basket()
self.enter_guest_details()
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_redirects_customers_who_have_skipped_shipping_method_step(self):
self.add_product_to_basket()
self.enter_guest_details()
self.enter_shipping_address()
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'checkout:shipping-method')
@override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True)
class TestPlacingOrder(CheckoutMixin, WebTestCase):
is_anonymous = True
def setUp(self):
reload_url_conf()
super(TestPlacingOrder, self).setUp()
def test_saves_guest_email_with_order(self):
self.add_product_to_basket()
self.enter_guest_details('hello@egg.com')
self.enter_shipping_address()
page = self.get(reverse('checkout:shipping-method')).follow().follow()
preview = page.click(linkid="view_preview")
thank_you = preview.forms['place_order_form'].submit().follow()
order = thank_you.context['order']
self.assertEqual('hello@egg.com', order.guest_email)
|
|
#!/usr/bin/env python
import logging
import random
import numpy.random
from mann import agent
import mann.helper
import mann.lens_in_writer
logger = logging.getLogger(__name__)
def setup_logger(fh, formatter):
logger.setLevel(logging.DEBUG)
fh = fh
# fh.setLevel(logging.DEBUG)
fh.setLevel(logging.CRITICAL)
formatter = formatter
fh.setFormatter(formatter)
global logger
logger.addHandler(fh)
logger.debug('Setup logger in agent_lens_recurrent.py')
return logger
class LensAgentRecurrent(agent.LensAgent):
agent_count = 0
def __init__(self, num_state_vars):
"""
:parm num_state_vars: Total number of processing units in the agent
positive + negative banks
:type num_state_vars: int
"""
assert isinstance(num_state_vars, int),\
'num_state_vars needs to be an int'
assert num_state_vars % 2 == 0,\
'num_state_vars needs to be an even value'
self._agent_id = LensAgentRecurrent.agent_count
LensAgentRecurrent.agent_count += 1
self._agent_type = "_".join([type(self).__name__, 'attitude'])
self._state = [0] * num_state_vars
self._temp_new_state = None
self._len_per_bank = int(len(self.state) / 2)
self._predecessors = []
self._num_update = 0
def __hash__(self):
return(hash(self.agent_id))
def __eq__(self, x, y):
return x.agent_id == y.agent_id
def calc_new_state_values_rps_1(self, num_predecessors_pick):
"""Calculate new state values from 1 random predecessor implementation
"""
predecessors_picked = self.pick_random_predecessor(
num_predecessors_pick)
def calculate_new_state_values(self,
pick_method='random_predecessor_single',
**kwargs):
"""Calculate new state values
:param pick_method: how should influencing agents be picked
:type pick_method: str
:param **kwargs: keyword arguments to be passed into different
calculate_new_state_values pick_method implementations
:returns: New state values
:rtype: tuple
"""
assert pick_method in ['random_predecessor_single'],\
'predecessor pick method not in list of known methods'
new_state_values = None
if pick_method == 'random_predecessor_single':
new_state_values = self.calc_new_state_values_rps_1(1)
return tuple(new_state_values)
def create_weight_file(self, weight_in_file_path, weight_directory,
ex_file_path, **kwargs):
"""Creates the weights for agent_lens_recurrent
This involves creating an .ex file (Typically Infl.ex)
calling lens (which will generate weights,
read in the .ex file, and train)
"""
logger.debug("creating weight file")
padded_agent_number = self.get_padded_agent_id()
# write a LENS ex file before calling lens to create weights
# number of predecessors
np = len(self.predecessors)
logger.debug("Number of predecessors: {}".format(str(np)))
self.write_lens_ex_file(
ex_file_path,
list_to_write_into_string=self.sample_predecessor_values(np))
logger.debug('Calling lens from agent_lens_recurrent.create_weight_file')
self.call_lens(lens_in_file_dir=weight_in_file_path,
lens_env={'a': padded_agent_number,
'bm': kwargs['between_mean'],
'bs': kwargs['between_sd'],
'wm': kwargs['within_mean'],
'ws': kwargs['within_sd'],
'cs': kwargs['clamp_strength']})
logger.debug('Finished alling lens from agent_lens_recurrent.create_weight_file')
# def get_new_state_values_from_out_file(self, file_dir, agent_type,
# column=0):
# """Get new state values from .out file
# :param file_dir: file directory of .out file
# :type file_dir: str
# :parm agent_type: agent type
# :type agent_type: str
# :parm column: column in the .out file to get new values from
# :type column: int
# typically agent_type is type(AGENT).__name__
# :returns: new state values
# :rtype: tuple
# """
# """Get new state values from .out file_d
# :returns: new state values
# :rtype: tuple
# """
# # creates a list and returns a tuple
# list_of_new_state = []
# read_file_path = file_dir
# with open(read_file_path, 'r') as f:
# start_bank1, end_bank1, start_bank2, end_bank2 = \
# self._start_end_update_out(f, self.agent_type)
# for line_idx, line in enumerate(f):
# # print(line)
# line_num = line_idx + 1 # python starts from line 0
# if start_bank1 <= line_num <= end_bank1 or \
# start_bank2 <= line_num <= end_bank2:
# # in a line that I want to save information for
# col = line.strip().split(' ')[column]
# list_of_new_state.append(float(col))
# # print('list of new state', list_of_new_state)
# return tuple(list_of_new_state)
def _pick_self(self):
lens_in_writer_helper = mann.lens_in_writer.LensInWriterHelper()
lens_ex_file_strings = []
agent_for_update = "{}-1".format(self.agent_id)
agent_for_update_ex_str = \
lens_in_writer_helper.clean_agent_state_in_file(
agent_for_update,
mann.helper.convert_list_to_delim_str(self.state, delim=' '))
lens_ex_file_strings.append(agent_for_update_ex_str)
return(lens_ex_file_strings)
def _pick_network(self, n):
"""Picks n from the predecessors and returns a list, lens_ex_file_string
where each element in the list is the example case used to write an .ex
LENS file
"""
predecessors_picked = random.sample(self.predecessors, n)
logger.debug('predecessors_picked: {}'.format(predecessors_picked))
lens_in_writer_helper = mann.lens_in_writer.LensInWriterHelper()
lens_ex_file_strings = []
lens_ex_file_string_self_1 = self._pick_self()
# agent_for_update = "{}-1".format(self.agent_id)
# agent_for_update_ex_str = \
# lens_in_writer_helper.clean_agent_state_in_file(
# agent_for_update,
# mann.helper.convert_list_to_delim_str(self.state, delim=' '))
# lens_ex_file_strings.append(agent_for_update_ex_str)
for predecessor in predecessors_picked:
predecessor_ex_str = \
lens_in_writer_helper.clean_agent_state_in_file(
str(predecessor.agent_id),
mann.helper.convert_list_to_delim_str(
predecessor.state,
delim=' '))
lens_ex_file_strings.append(predecessor_ex_str)
# print(lens_ex_file_strings)
lens_ex_file_string_self_1.extend(lens_ex_file_strings)
return(lens_ex_file_string_self_1)
def _pick_manual_predecessor_inputs(self, manual_predecessor_inputs, n):
"""Pick manually entered predecessor inputs
"""
lens_ex_file_string_self_1 = self._pick_self()
predecessors_picked = manual_predecessor_inputs[
numpy.random.choice(manual_predecessor_inputs.shape[0],
size=n,
replace=False),
:]
logger.debug('manual_predecessors_picked: {}'.
format(predecessors_picked))
lens_ex_file_strings = []
lens_in_writer_helper = mann.lens_in_writer.LensInWriterHelper()
for idx, predecessor in enumerate(predecessors_picked):
predecessor_ex_str = \
lens_in_writer_helper.clean_agent_state_in_file(
str(idx) + "_manual",
mann.helper.convert_list_to_delim_str(
predecessor,
delim=' '))
lens_ex_file_strings.append(predecessor_ex_str)
lens_ex_file_string_self_1.extend(lens_ex_file_strings)
return(lens_ex_file_string_self_1)
def write_lens_ex_file(self, file_to_write,
string_to_write=None,
list_to_write_into_string=None):
"""Takes a string or list and writes an .ex file for lens
"""
print("-"*80)
print("string", string_to_write)
print("list", list_to_write_into_string)
with open(file_to_write, 'w') as f:
if string_to_write is None and list_to_write_into_string is not None:
# passed in a list of stings to write and not a full string
ex_file_strings = '\n'.join(list_to_write_into_string)
logger.debug('writing ex file {}:\n{}\n{}\n{}'.format(
file_to_write,
'*' * 80,
ex_file_strings,
'*' * 80))
f.write(ex_file_strings)
elif string_to_write is not None and list_to_write_into_string is None:
# passed in just a string to directly write
logger.debug('writing ex file {}:\n{}\n{}\n{}'.format(
file_to_write,
'*' * 80,
string_to_write,
'*' * 80))
f.write(string_to_write)
else:
s = "Unknown combination of strings or list passed"
logger.fatal(s)
raise(ValueError, s)
def sample_predecessor_values(self, n, manual_predecessor_inputs=None):
"""Returns a list of strings that represent the inputs of n predecessors
Each element of the string will have the agent number, and a string
representation of the selected agent's activation values
"""
if n > len(self.predecessors):
raise(ValueError, "n is greater than number of predecessors")
# manual_predecessor_inputs = None
if manual_predecessor_inputs is not None:
logger.debug('Picking from manual_predecessor_inputs')
lens_ex_file_strings = self._pick_manual_predecessor_inputs(
manual_predecessor_inputs, n)
else:
logger.debug('Picking from self.predecessors')
lens_ex_file_strings = self._pick_network(n)
return(lens_ex_file_strings)
def _update_random_n(self, update_type, n, manual_predecessor_inputs,
**kwargs):
"""Uses `n` neighbors to update
"""
lens_ex_file_strings = self.sample_predecessor_values(
n,
manual_predecessor_inputs=manual_predecessor_inputs)
# manual_predecessor_inputs = None
# if manual_predecessor_inputs is not None:
# logger.debug('Picking from manual_predecessor_inputs')
# lens_ex_file_strings = self._pick_manual_predecessor_inputs(
# manual_predecessor_inputs, n)
# else:
# logger.debug('Picking from self.predecessors')
# lens_ex_file_strings = self._pick_network(n)
ex_file_strings = '\n'.join(lens_ex_file_strings)
ex_file_path = kwargs['lens_parameters']['ex_file_path']
self.write_lens_ex_file(ex_file_path, string_to_write=ex_file_strings)
# with open(ex_file_path, 'w') as f:
# f.write(ex_file_strings)
print('kwargs: ', kwargs['lens_parameters'])
print(kwargs['lens_parameters']['between_mean'])
lens_in_file_path = kwargs['lens_parameters']['in_file_path']
self.call_lens(lens_in_file_path,
lens_env={'a': self.get_padded_agent_id(),
'bm': kwargs['lens_parameters']['between_mean'],
'bs': kwargs['lens_parameters']['between_sd'],
'wm': kwargs['lens_parameters']['within_mean'],
'ws': kwargs['lens_parameters']['within_sd'],
'cs': kwargs['lens_parameters']['clamp_strength']})
if update_type == 'sequential':
new_state_path = kwargs['lens_parameters']['new_state_path']
new_state = self.get_new_state_values_from_out_file(new_state_path)
self.state = new_state
else:
raise ValueError('Only implemented sequential updating so far')
def update_agent_state(self, update_type, update_algorithm,
manual_predecessor_inputs, **kwargs):
"""Updates the agent
:param update_type: Can be either 'simultaneous' or 'sequential'
:type update_type: str
:param update_algorithm: 'random_1', 'random_all'
:type update_algorithm: str
"""
if self.has_predecessor():
if update_algorithm == 'random_1':
self._update_random_n(update_type, 1,
manual_predecessor_inputs, **kwargs)
elif update_algorithm == 'random_all':
if manual_predecessor_inputs is not None:
n = len(manual_predecessor_inputs)
elif manual_predecessor_inputs is None:
n = len(self.predecessors)
else:
raise ValueError
self._update_random_n(update_type, n,
manual_predecessor_inputs, **kwargs)
else:
raise ValueError("update algorithm unknown")
else:
logger.debug('Agent {} has no precessors.'.
format(self.agent_id))
@property
def agent_id(self):
return self._agent_id
@agent_id.setter
def agent_id(self, value):
try:
self._agent_id
raise mann.agent.AssignAgentIdError
except NameError:
if value < 0:
raise ValueError("Agent ID cannot be less than 0")
else:
self._agent_id = value
# LensAgentRecurrent.agent_count += 1
@property
def len_per_bank(self):
return self._len_per_pos_neg_bank
@len_per_bank.setter
def len_per_bank(self, value):
self._len_per_bank
@property
def agent_type(self):
return self._agent_type
@agent_type.setter
def agent_type(self, value):
self._agent_type = value
@property
def state(self):
return self._state
@state.setter
def state(self, new_state_values):
print('len new state values: {}'.format((new_state_values)))
print('len old state values: {}'.format((self.state)))
assert len(new_state_values) == len(self.state)
self._state = new_state_values[:]
# @property
# def temp_new_state(self):
# return self._temp_new_state
# @temp_new_state.setter
# def temp_new_state(self, temp_state_values):
# assert len(temp_state_values) == len(self.state)
# self._temp_new_state = temp_state_values[:]
@property
def predecessors(self):
return self._predecessors
@predecessors.setter
def predecessors(self, predecessors_list):
self._predecessors = predecessors_list
@property
def num_update(self):
return self._num_update
@num_update.setter
def num_update(self, value):
if value == self.num_update:
raise ValueError("Number update cannot be equal current count")
elif value < self.num_update:
raise ValueError(
"Number update cannot be lower than current count")
else:
self._num_update = value
|
|
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Feb 4, 2011.
"""
from datetime import datetime
from datetime import timedelta
from pyramid.compat import iteritems_
import pytest
from everest.querying.operators import UnaryOperator
from everest.querying.specifications import ConjunctionFilterSpecification
from everest.querying.specifications import DisjunctionFilterSpecification
from everest.querying.specifications import FilterSpecification
from everest.querying.specifications import FilterSpecificationGenerator
from everest.querying.specifications import NegationFilterSpecification
from everest.querying.specifications import asc
from everest.querying.specifications import cntd
from everest.querying.specifications import cnts
from everest.querying.specifications import desc
from everest.querying.specifications import ends
from everest.querying.specifications import eq
from everest.querying.specifications import ge
from everest.querying.specifications import gt
from everest.querying.specifications import le
from everest.querying.specifications import lt
from everest.querying.specifications import rng
from everest.querying.specifications import starts
__docformat__ = 'reStructuredText en'
__all__ = ['TestFilterSpecification',
]
TEXT_VALUE = 'Beta-2'
GREATER_THAN_TEXT_VALUE = 'Gamma-3'
LESS_THAN_TEXT_VALUE = 'Alpha-1'
TEXT_VALUE_LIST = [LESS_THAN_TEXT_VALUE, TEXT_VALUE,
GREATER_THAN_TEXT_VALUE]
NUMBER_VALUE = 40
GREATER_THAN_NUMBER_VALUE = NUMBER_VALUE + 1
LESS_THAN_NUMBER_VALUE = NUMBER_VALUE - 1
DATE_VALUE = datetime(1970, 1, 1)
GREATER_THAN_DATE_VALUE = DATE_VALUE + timedelta(1)
LESS_THAN_DATE_VALUE = DATE_VALUE - timedelta(1)
LIST_VALUES = [1, 2, 3, 4, 5]
class AlwaysTrueOperator(UnaryOperator):
name = 'true'
literal = 'T'
@staticmethod
def apply(arg): # pylint: disable=W0613
return True
class AlwaysTrueFilterSpecification(FilterSpecification):
operator = AlwaysTrueOperator
def __init__(self):
FilterSpecification.__init__(self)
def is_satisfied_by(self, candidate):
return self.operator.apply(candidate)
def accept(self, visitor):
pass
class AlwaysFalseOperator(UnaryOperator):
name = 'false'
literal = 'F'
@staticmethod
def apply(arg): # pylint: disable=W0613
return False
class AlwaysFalseFilterSpecification(FilterSpecification):
operator = AlwaysFalseOperator
def is_satisfied_by(self, candidate):
return False
def accept(self, visitor):
pass
class SpecificationCandidate(object):
def __str__(self):
attrs = ['%s: %s' % (k, getattr(self, k))
for k in self.__dict__
if not k.startswith('_')]
return 'Candidate -> %s' % ', '.join(attrs)
@classmethod
def make_instance(self, **attributes):
cand = SpecificationCandidate()
for attr_name, attr_value in iteritems_(attributes):
setattr(cand, attr_name, attr_value)
return cand
@pytest.fixture
def specification_candidate_factory():
return SpecificationCandidate.make_instance
@pytest.fixture
def specification_candidate(specification_candidate_factory): #pylint: disable=W0621
return specification_candidate_factory(text_attr=TEXT_VALUE,
number_attr=NUMBER_VALUE,
date_attr=DATE_VALUE,
list_attr=LIST_VALUES)
class TestFilterSpecification(object):
def test_basics(self, filter_specification_factory):
spec = filter_specification_factory.create_equal_to('foo', 'bar')
assert spec == spec
spec_other_attr = \
filter_specification_factory.create_equal_to('bar', 'bar')
assert spec != spec_other_attr
spec_other_value = \
filter_specification_factory.create_equal_to('foo', 'bar1')
assert spec != spec_other_value
str_str = '<%s op_name:' % spec.__class__.__name__
assert str(spec)[:len(str_str)] == str_str
@pytest.mark.parametrize('value,attr,outcome',
[(TEXT_VALUE, 'text_attr', True),
(GREATER_THAN_TEXT_VALUE, 'text_attr', False),
(NUMBER_VALUE, 'number_attr', True),
(GREATER_THAN_NUMBER_VALUE, 'number_attr',
False),
(DATE_VALUE, 'date_attr', True),
(GREATER_THAN_DATE_VALUE, 'date_attr', False),
])
def test_equal_to(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_equal_to(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr,outcome',
[(LESS_THAN_TEXT_VALUE, 'text_attr', True),
(TEXT_VALUE, 'text_attr', False),
(LESS_THAN_NUMBER_VALUE, 'number_attr', True),
(NUMBER_VALUE, 'number_attr', False),
(LESS_THAN_DATE_VALUE, 'date_attr', True),
(DATE_VALUE, 'date_attr', False),
])
def test_greater_than(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_greater_than(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr,outcome',
[(GREATER_THAN_TEXT_VALUE, 'text_attr', True),
(TEXT_VALUE, 'text_attr', False),
(GREATER_THAN_NUMBER_VALUE, 'number_attr',
True),
(NUMBER_VALUE, 'number_attr', False),
(GREATER_THAN_DATE_VALUE, 'date_attr', True),
(DATE_VALUE, 'date_attr', False),
])
def test_less_than(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_less_than(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr,outcome',
[(LESS_THAN_TEXT_VALUE, 'text_attr', True),
(TEXT_VALUE, 'text_attr', True),
(GREATER_THAN_TEXT_VALUE, 'text_attr', False),
(LESS_THAN_NUMBER_VALUE, 'number_attr', True),
(NUMBER_VALUE, 'number_attr', True),
(GREATER_THAN_NUMBER_VALUE, 'number_attr',
False),
(LESS_THAN_DATE_VALUE, 'date_attr', True),
(DATE_VALUE, 'date_attr', True),
(GREATER_THAN_DATE_VALUE, 'date_attr', False),
])
def test_greater_than_or_equal_to(self,
filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = \
filter_specification_factory.create_greater_than_or_equal_to(attr,
value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr,outcome',
[(GREATER_THAN_TEXT_VALUE, 'text_attr', True),
(TEXT_VALUE, 'text_attr', True),
(LESS_THAN_TEXT_VALUE, 'text_attr', False),
(GREATER_THAN_NUMBER_VALUE, 'number_attr',
True),
(NUMBER_VALUE, 'number_attr', True),
(LESS_THAN_NUMBER_VALUE, 'number_attr', False),
(GREATER_THAN_DATE_VALUE, 'date_attr', True),
(DATE_VALUE, 'date_attr', True),
(LESS_THAN_DATE_VALUE, 'date_attr', False),
])
def test_less_than_or_equal_to(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = \
filter_specification_factory.create_less_than_or_equal_to(attr,
value)
assert spec.is_satisfied_by(specification_candidate) is outcome
def test_in_range_basics(self, filter_specification_factory):
spec = filter_specification_factory.create_in_range('foo',
('bar0', 'bar1'))
assert spec.from_value == 'bar0'
assert spec.to_value == 'bar1'
assert spec == spec
spec_other_value = \
filter_specification_factory.create_in_range('foo',
('bar0', 'bar2'))
assert spec != spec_other_value
spec_other_attr = \
filter_specification_factory.create_in_range('bar',
('bar0', 'bar1'))
assert spec != spec_other_attr
@pytest.mark.parametrize('value1,value2,attr,outcome',
[(LESS_THAN_TEXT_VALUE, GREATER_THAN_TEXT_VALUE,
'text_attr', True),
(GREATER_THAN_TEXT_VALUE, LESS_THAN_TEXT_VALUE,
'text_attr', False),
(LESS_THAN_NUMBER_VALUE,
GREATER_THAN_NUMBER_VALUE, 'number_attr',
True),
(GREATER_THAN_DATE_VALUE,
LESS_THAN_DATE_VALUE, 'date_attr', False),
(LESS_THAN_DATE_VALUE, GREATER_THAN_DATE_VALUE,
'date_attr', True),
(GREATER_THAN_DATE_VALUE, LESS_THAN_DATE_VALUE,
'date_attr', False),
])
def test_in_range(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value1, value2, outcome):
spec = filter_specification_factory.create_in_range(attr,
(value1, value2))
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr,outcome',
[(TEXT_VALUE[0], 'text_attr', True),
(GREATER_THAN_TEXT_VALUE[0], 'text_attr',
False),
(LIST_VALUES[0], 'list_attr', True),
(LIST_VALUES[-1], 'list_attr', False),
])
def test_starts_with(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_starts_with(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr',
[(NUMBER_VALUE, 'number_attr'),
(DATE_VALUE, 'date_attr')
])
def test_starts_with_raises(self,
filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value):
spec = filter_specification_factory.create_starts_with(attr, value)
with pytest.raises(TypeError):
spec.is_satisfied_by(specification_candidate)
@pytest.mark.parametrize('value,attr,outcome',
[(TEXT_VALUE[-1], 'text_attr', True),
(GREATER_THAN_TEXT_VALUE[-1], 'text_attr',
False),
(LIST_VALUES[-1], 'list_attr', True),
(LIST_VALUES[0], 'list_attr', False),
])
def test_ends_with(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_ends_with(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr',
[(NUMBER_VALUE, 'number_attr'),
(DATE_VALUE, 'date_attr')
])
def test_ends_with_raises(self,
filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value):
spec = filter_specification_factory.create_ends_with(attr, value)
with pytest.raises(TypeError):
spec.is_satisfied_by(specification_candidate)
@pytest.mark.parametrize('value,attr,outcome',
[(TEXT_VALUE[2], 'text_attr', True),
(GREATER_THAN_TEXT_VALUE[-1], 'text_attr',
False),
(LIST_VALUES[2], 'list_attr', True),
(-1, 'list_attr', False),
])
def test_contains(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_contains(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr',
[(NUMBER_VALUE, 'number_attr'),
(DATE_VALUE, 'date_attr'),
])
def test_contains_raises(self,
filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value):
spec = filter_specification_factory.create_contains(attr, value)
with pytest.raises(TypeError):
spec.is_satisfied_by(specification_candidate)
@pytest.mark.parametrize('value,attr,outcome',
[(TEXT_VALUE_LIST, 'text_attr', True),
(LIST_VALUES, 'text_attr', False),
])
def test_contained(self, filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value, outcome):
spec = filter_specification_factory.create_contained(attr, value)
assert spec.is_satisfied_by(specification_candidate) is outcome
@pytest.mark.parametrize('value,attr',
[(NUMBER_VALUE, 'number_attr'),
(DATE_VALUE, 'date_attr'),
])
def test_contained_raises(self,
filter_specification_factory,
specification_candidate, # pylint: disable=W0621
attr, value):
spec = filter_specification_factory.create_contained(attr, value)
with pytest.raises(TypeError):
spec.is_satisfied_by(specification_candidate)
def test_conjunction_basics(self, filter_specification_factory):
always_true_spec = AlwaysTrueFilterSpecification()
always_false_spec = AlwaysFalseFilterSpecification()
spec = \
filter_specification_factory.create_conjunction(always_true_spec,
always_false_spec)
assert spec == spec
other_spec = \
filter_specification_factory.create_conjunction(always_false_spec,
always_true_spec)
assert spec != other_spec
str_str = '<%s left_spec:' % spec.__class__.__name__
assert str(spec)[:len(str_str)] == str_str
@pytest.mark.parametrize('left_spec,right_spec,outcome',
[(AlwaysTrueFilterSpecification(),
AlwaysTrueFilterSpecification(), True),
(AlwaysFalseFilterSpecification(),
AlwaysTrueFilterSpecification(), False),
(AlwaysTrueFilterSpecification(),
AlwaysFalseFilterSpecification(), False),
(AlwaysFalseFilterSpecification(),
AlwaysFalseFilterSpecification(), False),
])
def test_conjunction(self, filter_specification_factory,
specification_candidate_factory, #pylint: disable=W0621
left_spec, right_spec, outcome):
spec = filter_specification_factory.create_conjunction(left_spec,
right_spec)
cand = specification_candidate_factory()
assert spec.is_satisfied_by(cand) is outcome
@pytest.mark.parametrize('left_spec,right_spec,outcome',
[(AlwaysFalseFilterSpecification(),
AlwaysTrueFilterSpecification(), True),
(AlwaysTrueFilterSpecification(),
AlwaysFalseFilterSpecification(), True),
(AlwaysTrueFilterSpecification(),
AlwaysTrueFilterSpecification(), True),
(AlwaysFalseFilterSpecification(),
AlwaysFalseFilterSpecification(), False),
])
def test_disjunction(self, filter_specification_factory,
specification_candidate_factory, #pylint: disable=W0621
left_spec, right_spec, outcome):
spec = filter_specification_factory.create_disjunction(left_spec,
right_spec)
cand = specification_candidate_factory()
assert spec.is_satisfied_by(cand) is outcome
def test_negation_basics(self, filter_specification_factory):
af_spec = AlwaysFalseFilterSpecification()
spec = filter_specification_factory.create_negation(af_spec)
assert spec == spec
at_spec = AlwaysTrueFilterSpecification()
other_spec = filter_specification_factory.create_negation(at_spec)
assert spec != other_spec
str_str = '<%s wrapped_spec:' % spec.__class__.__name__
assert str(spec)[:len(str_str)] == str_str
@pytest.mark.parametrize('wrapped_spec,outcome',
[(AlwaysFalseFilterSpecification(), True),
(AlwaysTrueFilterSpecification(), False)
])
def test_negation(self, filter_specification_factory,
specification_candidate_factory, #pylint: disable=W0621
wrapped_spec, outcome):
spec = filter_specification_factory.create_negation(wrapped_spec)
cand = specification_candidate_factory()
assert spec.is_satisfied_by(cand) is outcome
def test_order_basics(self, order_specification_factory):
spec = order_specification_factory.create_ascending('foo')
str_str = '<%s attr_name:' % spec.__class__.__name__
assert str(spec)[:len(str_str)] == str_str
@pytest.mark.parametrize('attr',
['number_attr', 'text_attr'])
def test_order_ascending(self, order_specification_factory,
specification_candidate_factory, #pylint: disable=W0621
attr):
spec = order_specification_factory.create_ascending(attr)
first_candidate = \
specification_candidate_factory(number_attr=0, text_attr='a')
second_candidate = \
specification_candidate_factory(number_attr=1, text_attr='b')
assert spec.attr_name == attr
assert not spec.eq(first_candidate, second_candidate)
assert spec.ne(first_candidate, second_candidate)
assert spec.lt(first_candidate, second_candidate)
assert not spec.ge(first_candidate, second_candidate)
assert spec.le(first_candidate, second_candidate)
assert not spec.gt(first_candidate, second_candidate)
@pytest.mark.parametrize('attr',
['number_attr', 'text_attr'])
def test_order_descending(self, order_specification_factory,
specification_candidate_factory, #pylint: disable=W0621
attr):
spec = order_specification_factory.create_descending(attr)
first_candidate = \
specification_candidate_factory(number_attr=0, text_attr='a')
second_candidate = \
specification_candidate_factory(number_attr=1, text_attr='b')
assert spec.attr_name == attr
assert not spec.eq(first_candidate, second_candidate)
assert spec.ne(first_candidate, second_candidate)
assert not spec.lt(first_candidate, second_candidate)
assert spec.ge(first_candidate, second_candidate)
assert not spec.le(first_candidate, second_candidate)
assert spec.gt(first_candidate, second_candidate)
def test_order_natural(self, order_specification_factory,
specification_candidate_factory): #pylint: disable=W0621
text_spec = order_specification_factory.create_natural('text_attr')
first_candidate = \
specification_candidate_factory(number_attr=0, text_attr='a10')
second_candidate = \
specification_candidate_factory(number_attr=1, text_attr='a9')
assert not text_spec.lt(first_candidate, second_candidate)
number_spec = \
order_specification_factory.create_natural('number_attr')
assert number_spec.lt(first_candidate, second_candidate)
def test_order_conjunction(self, order_specification_factory,
specification_candidate_factory): #pylint: disable=W0621
text_spec = order_specification_factory.create_natural('text_attr')
number_spec = \
order_specification_factory.create_natural('number_attr')
conj_spec = \
order_specification_factory.create_conjunction(number_spec,
text_spec)
first_candidate = \
specification_candidate_factory(number_attr=0, text_attr='a')
second_candidate = \
specification_candidate_factory(number_attr=0, text_attr='b')
str_str = '<%s left_spec:' % conj_spec.__class__.__name__
assert str(conj_spec)[:len(str_str)] == str_str
assert conj_spec.lt(first_candidate, second_candidate)
assert conj_spec.le(first_candidate, second_candidate)
assert not conj_spec.eq(first_candidate, second_candidate)
assert conj_spec.cmp(first_candidate, second_candidate) == -1
inv_conj_spec = \
order_specification_factory.create_conjunction(text_spec,
number_spec)
assert inv_conj_spec.lt(first_candidate, second_candidate)
assert inv_conj_spec.le(first_candidate, second_candidate)
assert not inv_conj_spec.eq(first_candidate, second_candidate)
assert inv_conj_spec.cmp(first_candidate, second_candidate) == -1
class TestSpecificationGenerator(object):
@pytest.mark.parametrize('attrs,generator',
[(dict(number_attr=NUMBER_VALUE,
text_attr=TEXT_VALUE), eq),
(dict(text_attr=TEXT_VALUE[0]), starts),
(dict(text_attr=TEXT_VALUE[-1]), ends),
(dict(number_attr=NUMBER_VALUE + 1), lt),
(dict(number_attr=NUMBER_VALUE), le),
(dict(number_attr=NUMBER_VALUE - 1), gt),
(dict(number_attr=NUMBER_VALUE), ge),
(dict(text_attr=TEXT_VALUE[1:2]), cnts),
(dict(text_attr=TEXT_VALUE), cntd),
(dict(number_attr=(NUMBER_VALUE - 1,
NUMBER_VALUE + 1)), rng),
])
def test_plain_generators(self, class_configurator,
specification_candidate, attrs, #pylint: disable=W0621
generator):
class_configurator.begin()
try:
spec = generator(**attrs)
if len(attrs) > 1:
assert isinstance(spec, ConjunctionFilterSpecification)
assert spec.is_satisfied_by(specification_candidate)
finally:
class_configurator.end()
@pytest.mark.parametrize('attrs,generator,outcome',
[(('number_attr', 'text_attr'), asc, True),
(('number_attr', 'text_attr'), desc, False),
])
def test_order_generators(self, class_configurator,
specification_candidate_factory, #pylint: disable=W0621
attrs, generator, outcome):
first_candidate = \
specification_candidate_factory(number_attr=NUMBER_VALUE,
text_attr=TEXT_VALUE)
second_candidate = \
specification_candidate_factory(number_attr=NUMBER_VALUE,
text_attr=GREATER_THAN_TEXT_VALUE)
class_configurator.begin()
try:
spec = generator(*attrs)
assert spec.lt(first_candidate, second_candidate) is outcome
finally:
class_configurator.end()
def test_instantiating_generator(self, filter_specification_factory,
specification_candidate): #pylint: disable=W0621
gen = FilterSpecificationGenerator(filter_specification_factory)
spec = gen.lt(number_attr=NUMBER_VALUE + 1) \
& gen.gt(number_attr=NUMBER_VALUE - 1)
assert spec.is_satisfied_by(specification_candidate)
def test_generator_or(self, specification_candidate): #pylint: disable=W0621
spec = lt(number_attr=NUMBER_VALUE + 1) \
| gt(number_attr=NUMBER_VALUE + 1)
assert spec.is_satisfied_by(specification_candidate)
def test_and(self, specification_candidate): #pylint: disable=W0621
spec = eq(number_attr=NUMBER_VALUE) & eq(text_attr=TEXT_VALUE)
assert isinstance(spec, ConjunctionFilterSpecification)
assert spec.is_satisfied_by(specification_candidate)
def test_or(self, specification_candidate): #pylint: disable=W0621
spec = eq(number_attr=NUMBER_VALUE - 1) | eq(text_attr=TEXT_VALUE)
assert isinstance(spec, DisjunctionFilterSpecification)
assert spec.is_satisfied_by(specification_candidate)
def test_not(self, specification_candidate): #pylint: disable=W0621
spec = ~eq(number_attr=NUMBER_VALUE - 1)
assert isinstance(spec, NegationFilterSpecification)
assert spec.is_satisfied_by(specification_candidate)
|
|
#!/usr/bin/env python
################################################################################
# Copyright (c) 2016 Thomas McAllen #
# All Rights Reserved. #
# #
# #
# DISCLAIMER #
# #
# Please note: All scripts/tools in this repo are released for use "AS #
# IS" without any warranties of any kind, including, but not limited to #
# their installation, use, or performance. I disclaim any and all #
# warranties, either express or implied, including but not limited to #
# any warranty of noninfringement, merchantability, and/ or fitness for #
# a particular purpose. I do not warrant that the technology will #
# meet your requirements, that the operation thereof will be #
# uninterrupted or error-free, or that any errors will be corrected. #
# #
# Any use of these scripts and tools is at your own risk. There is no #
# guarantee that they have been through thorough testing in a #
# comparable environment and I am not responsible for any damage #
# or data loss incurred with their use. #
# #
# You are responsible for reviewing and testing any scripts you run #
# thoroughly before use in any non-testing environment. #
# #
# #
# LICENSE #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
################################################################################
#version = 1.0.1-alpha
#revdate = 2016-08-26
import acitoolkit.acitoolkit as aci
import collections, sys
class QueryFexPortProfile():
@classmethod
def find_bindings(self, session, node, fex, fexprof):
self.fexprof = fexprof
self.node = node
self.fex = fex
#query static path/extpath bindings
self.mo_query_prefix_a = '/api/node/class/fvRsPathAtt.json?query-target-filter=and(wcard(fvRsPathAtt.tDn,"paths-'
self.mo_query_suffix_a1 = '/extpaths-'
self.mo_query_suffix_a2 = '"))'
self.mo_query_url_a = self.mo_query_prefix_a + self.node + self.mo_query_suffix_a1 + self.fex + self.mo_query_suffix_a2
self.ret = session.get(self.mo_query_url_a)
self.binding_list = []
self.node_data = self.ret.json()['imdata']
#query fex profile port ids
self.mo_query_prefix_b = '/api/node/mo/uni/infra/fexprof-'
self.mo_query_suffix_b = '.json?query-target=subtree&target-subtree-class=infraPortBlk'
self.mo_query_url_b = self.mo_query_prefix_b + self.fexprof + self.mo_query_suffix_b
self.ret2 = session.get(self.mo_query_url_b)
self.node_data2 = self.ret2.json()['imdata']
#query static extprotpath bindings
self.mo_query_prefix_c = '/api/node/class/fvRsPathAtt.json?query-target-filter=and(wcard(fvRsPathAtt.tDn,"/extprotpaths-'
self.mo_query_suffix_c = '"))'
self.mo_query_url_c = self.mo_query_prefix_c + self.fex + self.mo_query_suffix_c
self.ret3 = session.get(self.mo_query_url_c)
self.node_data3 = self.ret3.json()['imdata']
self.port_list_vpc = []
for self.c in self.node_data2:
self.port_list_vpc.append((self.c['infraPortBlk']['attributes']['dn'].split('-')[2],
self.c['infraPortBlk']['attributes']['toPort']))
#get bindings & epg via paths/extpaths
for self.x in self.node_data:
self.tDn = self.x['fvRsPathAtt']['attributes']['tDn'].rpartition('/')[2].rpartition(']')[0]
self.dnEPG = self.x['fvRsPathAtt']['attributes']['dn'].split('/')[3].rpartition('epg-')[2]
#add individual port
if self.tDn.isdigit():
if self.tDn not in self.binding_list:
self.binding_list.append(("eth" + self.fex + "/1/" + self.tDn, self.dnEPG))
#add vpc port
if not self.tDn.isdigit():
self.tDn_vpc = self.tDn.rpartition('[')[2]
for self.x, self.y in self.port_list_vpc:
#match vpc name fvRsPathAtt[tDn] from bindings w/ infraPortBlk[dn] from FP
if self.tDn_vpc == self.x:
#add epg fvRsPathAtt[dn] & intf infraPortBlk[toPort](not acct fromPort vpc range)
self.binding_list.append(("eth" + self.fex + "/1/" + self.y, self.dnEPG))
#get bindings & epg via extprotpaths
for self.x in self.node_data3:
self.tDn = self.x['fvRsPathAtt']['attributes']['tDn'].rpartition('/')[2].rpartition(']')[0]
self.dnEPG = self.x['fvRsPathAtt']['attributes']['dn'].split('/')[3].rpartition('epg-')[2]
#add individual port
if self.tDn.isdigit():
if self.tDn not in self.binding_list:
self.binding_list.append(("eth" + self.fex + "/1/" + self.tDn, self.dnEPG))
#add vpc port
if not self.tDn.isdigit():
self.tDn_vpc = self.tDn.rpartition('[')[2]
for self.x, self.y in self.port_list_vpc:
#match vpc name fvRsPathAtt[tDn] from bindings w/ infraPortBlk[dn] from fp
if self.tDn_vpc == self.x:
#add epg fvRsPathAtt[dn] & intf infraPortBlk[toPort](not acct fromPort vpc range)
self.binding_list.append(("eth" + self.fex + "/1/" + self.y, self.dnEPG))
return self.binding_list
@classmethod
def find_interfaces(self, session, node, fex):
self.node = node
self.fex = fex
#query fex port ids
self.mo_query_prefix = '/api/node/class/topology/pod-1/node-'
self.mo_query_suffix1 = '/l1PhysIf.json?query-target-filter=and(wcard(l1PhysIf.id,"eth'
self.mo_query_suffix2 = '"))'
self.mo_query_url = self.mo_query_prefix + self.node + self.mo_query_suffix1 + self.fex + self.mo_query_suffix2
self.ret = session.get(self.mo_query_url)
self.intf_list = []
self.node_data = self.ret.json()['imdata']
for self.x in self.node_data:
self.intf_list.append(self.x['l1PhysIf']['attributes']['id'])
return self.intf_list
@classmethod
def find_hports(self, session, fexprof, accportprof):
self.fexprof = fexprof
self.accportprof = accportprof
self.mo_query_subprefix = '/api/node/mo/uni/infra/fexprof-'
self.mo_query_subsuffix = '.json?query-target=subtree&target-subtree-class='
self.mo_query_prefix = self.mo_query_subprefix + self.fexprof + self.mo_query_subsuffix
#query fex profile port attributes
self.mo_query_url = self.mo_query_prefix + 'infraHPortS'
self.ret = session.get(self.mo_query_url)
self.node_data = self.ret.json()['imdata']
#query fex profile port ids
self.mo_query_url2 = self.mo_query_prefix + 'infraPortBlk'
self.ret2 = session.get(self.mo_query_url2)
self.node_data2 = self.ret.json()['imdata']
self.port_list = []
self.node_data = self.ret.json()['imdata']
self.node_data2 = self.ret2.json()['imdata']
for self.a, self.b in zip(self.node_data, self.node_data2):
self.port_list.append((self.a['infraHPortS']['attributes']['name'],
self.a['infraHPortS']['attributes']['descr'][:50].strip(),
self.b['infraPortBlk']['attributes']['fromPort'],
self.b['infraPortBlk']['attributes']['toPort']))
return self.port_list
@classmethod
def find_l1ports(self, session, node):
self.node = node
#query node intf attributes
self.mo_query_prefix = '/api/node/class/topology/pod-1/node-'
self.mo_query_suffix = '/l1PhysIf.json?rsp-subtree=children&rsp-subtree-class=ethpmPhysIf'
self.mo_query_url = self.mo_query_prefix + self.node + self.mo_query_suffix
self.ret = session.get(self.mo_query_url)
if self.ret.json()['totalCount'] == "1":
print "\n no node or entries found for " + self.node
sys.exit(0)
self.node_data = self.ret.json()['imdata']
self.l1port_list = []
for self.x in self.node_data:
self.l1port_list.append((self.x['l1PhysIf']['attributes']['id'],
self.x['l1PhysIf']['attributes']['adminSt'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operSt'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operSpeed'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operDuplex']))
return self.l1port_list
class QueryAccessPortProfile():
@classmethod
def find_bindings(self, session, node):
self.node = node
#query static path bindings
self.mo_query_prefix = '/api/node/class/fvRsPathAtt.json?query-target-filter=and(wcard(fvRsPathAtt.tDn,"paths-'
self.mo_query_suffix = '/pathep-"))'
self.mo_query_url = self.mo_query_prefix + self.node + self.mo_query_suffix
self.ret = session.get(self.mo_query_url)
self.binding_list = []
self.node_data = self.ret.json()['imdata']
#get bindings & epg via paths
for self.x in self.node_data:
self.tDn = self.x['fvRsPathAtt']['attributes']['tDn'].rpartition('/')[2].rpartition(']')[0]
self.dnEPG = self.x['fvRsPathAtt']['attributes']['dn'].split('/')[3].rpartition('epg-')[2]
#add individual port
if self.tDn.isdigit():
#de-dup
if self.tDn not in self.binding_list:
self.binding_list.append(("eth1/" + self.tDn, self.dnEPG))
#add vpc port in-prog
return self.binding_list
@classmethod
def find_interfaces(self, session, node):
self.node = node
#query node port ids
self.mo_query_prefix = '/api/node/class/topology/pod-1/node-'
self.mo_query_suffix = '/l1PhysIf.json?query-target-filter=and(wcard(l1PhysIf.id,"eth1/"))'
self.mo_query_url = self.mo_query_prefix + self.node + self.mo_query_suffix
self.ret = session.get(self.mo_query_url)
self.intf_list = []
self.node_data = self.ret.json()['imdata']
for self.x in self.node_data:
self.intf_list.append(self.x['l1PhysIf']['attributes']['id'])
return self.intf_list
@classmethod
def find_hports(self, session, accportprof):
self.accportprof = accportprof
self.mo_query_subprefix = '/api/node/mo/uni/infra/accportprof-'
self.mo_query_subsuffix = '.json?query-target=subtree&target-subtree-class='
self.mo_query_prefix = self.mo_query_subprefix + self.accportprof + self.mo_query_subsuffix
#query access port profile attributes
self.mo_query_url = self.mo_query_prefix + 'infraHPortS'
self.ret = session.get(self.mo_query_url)
self.node_data = self.ret.json()['imdata']
#query access port profile attributes
self.mo_query_url2 = self.mo_query_prefix + 'infraPortBlk'
self.ret2 = session.get(self.mo_query_url2)
self.node_data2 = self.ret.json()['imdata']
self.port_list = []
self.node_data = self.ret.json()['imdata']
self.node_data2 = self.ret2.json()['imdata']
for self.a, self.b in zip(self.node_data, self.node_data2):
self.port_list.append((self.a['infraHPortS']['attributes']['name'],
self.a['infraHPortS']['attributes']['descr'][:50].strip(),
self.b['infraPortBlk']['attributes']['fromPort'],
self.b['infraPortBlk']['attributes']['toPort']))
return self.port_list
@classmethod
def find_l1ports(self, session, node):
self.node = node
#query node intf attributes
self.mo_query_prefix = '/api/node/class/topology/pod-1/node-'
self.mo_query_suffix = '/l1PhysIf.json?rsp-subtree=children&rsp-subtree-class=ethpmPhysIf'
self.mo_query_url = self.mo_query_prefix + self.node + self.mo_query_suffix
self.ret = session.get(self.mo_query_url)
if self.ret.json()['totalCount'] == "1":
print "\n no node or entries found for " + self.node
sys.exit(0)
self.node_data = self.ret.json()['imdata']
self.l1port_list = []
for self.x in self.node_data:
self.l1port_list.append((self.x['l1PhysIf']['attributes']['id'],
self.x['l1PhysIf']['attributes']['adminSt'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operSt'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operSpeed'],
self.x['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operDuplex']))
return self.l1port_list
def main():
description = 'Logs onto the APIC and displays static binding paths.'
creds = aci.Credentials('apic', description)
creds.add_argument('-n' , '--node' , type=str, help='Node(leaf) id e.g. 123' , default=None)
creds.add_argument('-f' , '--fex' , type=str, help='Fex id e.g. 321' , default=None)
creds.add_argument('-fp' , '--fexprof' , type=str, help='Fex profile e.g. FP321' , default=None)
creds.add_argument('-app', '--accportprof' , type=str, help='Interface/port profile e.g. APP123', default=None)
args = creds.get()
session = aci.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
sys.exit(0)
if args.fexprof:
if not args.node or not args.fex:
print "\n Must enter node and fex along with the fex profile"
sys.exit(0)
compareFexProf = QueryFexPortProfile()
l1prt_list = compareFexProf.find_l1ports (session, args.node)
interface_list = compareFexProf.find_interfaces(session, args.node, args.fex)
hport_list = compareFexProf.find_hports (session, args.fexprof, args.accportprof)
binding_list = compareFexProf.find_bindings (session, args.node, args.fex, args.fexprof)
elif args.accportprof:
if not args.node:
print "\n Must enter node along with the access port profile"
exit(0)
compareAccessProf = QueryAccessPortProfile()
l1prt_list = compareAccessProf.find_l1ports (session, args.node)
binding_list = compareAccessProf.find_bindings (session, args.node)
interface_list = compareAccessProf.find_interfaces(session, args.node)
hport_list = compareAccessProf.find_hports (session, args.accportprof)
if not args.fexprof and not args.accportprof:
print "\n Must enter either combination (1) --node --accportprof (2) --node --fex --fexprof"
sys.exit(0)
intf_dict = {}
for interfaces in interface_list:
intf_dict[interfaces] = []
binding = ''
none = 'none'
for interfaces in interface_list:
#total bindings per intf
bindingct = sum(x == interfaces for x, _ in binding_list)
for binding, _ in binding_list:
if interfaces == binding:
bindings = ' bindings(%s)' % (bindingct)
intf_dict[interfaces].append('%-15s' % (bindings))
break
if interfaces != binding:
intf_dict[interfaces].append(' %-11s ' % (none))
for k, v in intf_dict.iteritems():
#id, adminst, operst, operspd, operdplx
for a, b, c, d, e in l1prt_list :
if k == a:
intf_dict[k].append(b)
intf_dict[k].append(c)
intf_dict[k].append('%-10s' % (d))
intf_dict[k].append(e)
for k, v in intf_dict.iteritems():
#name, desc, fromprt, toprt
for w, x, y, z in hport_list:
#leaf
if k.count('/') == 1:
#add name & description to interface/range
if int(k.split('/')[1]) in range(int(y), int(z)+1):
print "----------->" + w
intf_dict[k].append(' %-24s' % (w))
intf_dict[k].append(x)
#fex
if k.count('/') == 2:
#add name & description to interface/range
if int(k.split('/')[2]) in range(int(y), int(z)+1):
intf_dict[k].append(' %-24s' % (w))
intf_dict[k].append(x)
if args.node and args.fex:
display_output = collections.OrderedDict(sorted(intf_dict.items(), key=lambda (key, value): int(key.split('/')[2])))
if args.node and not args.fex:
display_output = collections.OrderedDict(sorted(intf_dict.items(), key=lambda (key, value): int(key.split('/')[1])))
print "\n"
print " " + "-"*137
print " Port Bindings AdmnSt OperSt OperSpd OperDplx Name Description"
print " " + "-"*137
for k,v in display_output.items():
print " %-12s %-15s" % (k, "".join(word.ljust(8) for word in v))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
|
import StringIO
from novaclient.v1_1 import servers
from tests import utils
from tests.v1_1 import fakes
cs = fakes.FakeClient()
class ServersTest(utils.TestCase):
def test_list_servers(self):
sl = cs.servers.list()
cs.assert_called('GET', '/servers/detail')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_list_servers_undetailed(self):
sl = cs.servers.list(detailed=False)
cs.assert_called('GET', '/servers')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_get_server_details(self):
s = cs.servers.get(1234)
cs.assert_called('GET', '/servers/1234')
self.assertTrue(isinstance(s, servers.Server))
self.assertEqual(s.id, 1234)
self.assertEqual(s.status, 'BUILD')
def test_create_server(self):
s = cs.servers.create(
name="My server",
image=1,
flavor=1,
meta={'foo': 'bar'},
userdata="hello moto",
key_name="fakekey",
files={
'/etc/passwd': 'some data', # a file
'/tmp/foo.txt': StringIO.StringIO('data'), # a stream
}
)
cs.assert_called('POST', '/servers')
self.assertTrue(isinstance(s, servers.Server))
def test_create_server_userdata_file_object(self):
s = cs.servers.create(
name="My server",
image=1,
flavor=1,
meta={'foo': 'bar'},
userdata=StringIO.StringIO('hello moto'),
files={
'/etc/passwd': 'some data', # a file
'/tmp/foo.txt': StringIO.StringIO('data'), # a stream
},
)
cs.assert_called('POST', '/servers')
self.assertTrue(isinstance(s, servers.Server))
def test_update_server(self):
s = cs.servers.get(1234)
# Update via instance
s.update(name='hi')
cs.assert_called('PUT', '/servers/1234')
s.update(name='hi')
cs.assert_called('PUT', '/servers/1234')
# Silly, but not an error
s.update()
# Update via manager
cs.servers.update(s, name='hi')
cs.assert_called('PUT', '/servers/1234')
def test_delete_server(self):
s = cs.servers.get(1234)
s.delete()
cs.assert_called('DELETE', '/servers/1234')
cs.servers.delete(1234)
cs.assert_called('DELETE', '/servers/1234')
cs.servers.delete(s)
cs.assert_called('DELETE', '/servers/1234')
def test_delete_server_meta(self):
s = cs.servers.delete_meta(1234, ['test_key'])
cs.assert_called('DELETE', '/servers/1234/metadata/test_key')
def test_set_server_meta(self):
s = cs.servers.set_meta(1234, {'test_key': 'test_value'})
reval = cs.assert_called('POST', '/servers/1234/metadata',
{'metadata': {'test_key': 'test_value'}})
def test_find(self):
s = cs.servers.find(name='sample-server')
cs.assert_called('GET', '/servers/detail')
self.assertEqual(s.name, 'sample-server')
# Find with multiple results arbitraility returns the first item
s = cs.servers.find(flavor={"id": 1, "name": "256 MB Server"})
sl = cs.servers.findall(flavor={"id": 1, "name": "256 MB Server"})
self.assertEqual(sl[0], s)
self.assertEqual([s.id for s in sl], [1234, 5678])
def test_reboot_server(self):
s = cs.servers.get(1234)
s.reboot()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.reboot(s, type='HARD')
cs.assert_called('POST', '/servers/1234/action')
def test_rebuild_server(self):
s = cs.servers.get(1234)
s.rebuild(image=1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rebuild(s, image=1)
cs.assert_called('POST', '/servers/1234/action')
s.rebuild(image=1, password='5678')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rebuild(s, image=1, password='5678')
cs.assert_called('POST', '/servers/1234/action')
def test_resize_server(self):
s = cs.servers.get(1234)
s.resize(flavor=1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.resize(s, flavor=1)
cs.assert_called('POST', '/servers/1234/action')
def test_confirm_resized_server(self):
s = cs.servers.get(1234)
s.confirm_resize()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.confirm_resize(s)
cs.assert_called('POST', '/servers/1234/action')
def test_revert_resized_server(self):
s = cs.servers.get(1234)
s.revert_resize()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.revert_resize(s)
cs.assert_called('POST', '/servers/1234/action')
def test_migrate_server(self):
s = cs.servers.get(1234)
s.migrate()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.migrate(s)
cs.assert_called('POST', '/servers/1234/action')
def test_add_fixed_ip(self):
s = cs.servers.get(1234)
s.add_fixed_ip(1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.add_fixed_ip(s, 1)
cs.assert_called('POST', '/servers/1234/action')
def test_remove_fixed_ip(self):
s = cs.servers.get(1234)
s.remove_fixed_ip('10.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.remove_fixed_ip(s, '10.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
def test_add_floating_ip(self):
s = cs.servers.get(1234)
s.add_floating_ip('11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.add_floating_ip(s, '11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
f = cs.floating_ips.list()[0]
cs.servers.add_floating_ip(s, f)
cs.assert_called('POST', '/servers/1234/action')
s.add_floating_ip(f)
cs.assert_called('POST', '/servers/1234/action')
def test_remove_floating_ip(self):
s = cs.servers.get(1234)
s.remove_floating_ip('11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.remove_floating_ip(s, '11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
f = cs.floating_ips.list()[0]
cs.servers.remove_floating_ip(s, f)
cs.assert_called('POST', '/servers/1234/action')
s.remove_floating_ip(f)
cs.assert_called('POST', '/servers/1234/action')
def test_rescue(self):
s = cs.servers.get(1234)
s.rescue()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rescue(s)
cs.assert_called('POST', '/servers/1234/action')
def test_unrescue(self):
s = cs.servers.get(1234)
s.unrescue()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.unrescue(s)
cs.assert_called('POST', '/servers/1234/action')
def test_get_console_output_without_length(self):
success = 'foo'
s = cs.servers.get(1234)
s.get_console_output()
self.assertEqual(s.get_console_output(), success)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_console_output(s)
self.assertEqual(cs.servers.get_console_output(s), success)
cs.assert_called('POST', '/servers/1234/action')
def test_get_console_output_with_length(self):
success = 'foo'
s = cs.servers.get(1234)
s.get_console_output(length=50)
self.assertEqual(s.get_console_output(length=50), success)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_console_output(s, length=50)
self.assertEqual(cs.servers.get_console_output(s, length=50), success)
cs.assert_called('POST', '/servers/1234/action')
def test_get_server_actions(self):
s = cs.servers.get(1234)
actions = s.actions()
self.assertTrue(actions is not None)
cs.assert_called('GET', '/servers/1234/actions')
actions_from_manager = cs.servers.actions(1234)
self.assertTrue(actions_from_manager is not None)
cs.assert_called('GET', '/servers/1234/actions')
self.assertEqual(actions, actions_from_manager)
def test_get_server_diagnostics(self):
s = cs.servers.get(1234)
diagnostics = s.diagnostics()
self.assertTrue(diagnostics is not None)
cs.assert_called('GET', '/servers/1234/diagnostics')
diagnostics_from_manager = cs.servers.diagnostics(1234)
self.assertTrue(diagnostics_from_manager is not None)
cs.assert_called('GET', '/servers/1234/diagnostics')
self.assertEqual(diagnostics, diagnostics_from_manager)
def test_get_vnc_console(self):
s = cs.servers.get(1234)
s.get_vnc_console('fake')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_vnc_console(s, 'fake')
cs.assert_called('POST', '/servers/1234/action')
def test_create_image(self):
s = cs.servers.get(1234)
s.create_image('123')
cs.assert_called('POST', '/servers/1234/action')
s.create_image('123', {})
cs.assert_called('POST', '/servers/1234/action')
cs.servers.create_image(s, '123')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.create_image(s, '123', {})
|
|
from __future__ import print_function
from builtins import map
from builtins import range
import sys
import os
from flask_testing import TestCase
from flask import request
from flask import url_for, Flask
import unittest
import requests
from requests.exceptions import ReadTimeout
import time
from object_service import app
import json
import httpretty
import timeout_decorator
import mock
import pytest
class TestConfig(TestCase):
'''Check if config has necessary entries'''
def create_app(self):
'''Create the wsgi application'''
app_ = app.create_app()
return app_
def test_config_values(self):
'''Check if all required config variables are there'''
required = ["OBJECTS_SIMBAD_TAP_URL",
"OBJECTS_SIMBAD_TAP_URL_CDS",
"OBJECTS_NED_URL",
"OBJECTS_NED_OBJSEARCH",
"OBJECTS_SIMBAD_MAX_RADIUS",
"OBJECTS_NED_MAX_RADIUS",
"OBJECTS_CACHE_TIMEOUT",
"OBJECTS_DEFAULT_RADIUS",
"OBJECTS_SIMBAD_MAX_NUMBER",
"OBJECTS_NED_URL",
"OBJECTS_SOLRQUERY_URL",
"API_URL"
]
missing = [x for x in required if x not in self.app.config.keys()]
self.assertTrue(len(missing) == 0)
class TestDataRetrieval(TestCase):
'''Check if methods return expected results'''
def create_app(self):
'''Create the wsgi application'''
app_ = app.create_app()
return app_
@httpretty.activate
def test_get_simbad_identifiers(self):
'''Test to see if retrieval of SIMBAD identifiers method behaves as expected'''
from object_service.SIMBAD import get_simbad_data
objects = ['Andromeda','LMC']
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
result = get_simbad_data(objects, 'objects')
expected = {'data': {u'LMC': {'id': '3133169', 'canonical': u'LMC'}, u'ANDROMEDA': {'id': '1575544', 'canonical': u'ANDROMEDA'}}}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_simbad_objects(self):
'''Test to see if retrieval of SIMBAD objects method behaves as expected'''
from object_service.SIMBAD import get_simbad_data
identifiers = ["3133169", "1575544"]
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
result = get_simbad_data(identifiers, 'identifiers')
expected = {'data': {u'3133169': {'id': '3133169', 'canonical': u'LMC'}, u'1575544': {'id': '1575544', 'canonical': u'ANDROMEDA'}}}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_ned_objects(self):
'''Test to see if retrieval of NED objects method behaves as expected'''
from object_service.NED import get_ned_data
identifiers = ["LMC"]
mockdata = {u'NameResolver': u'NED-Egret', u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'Large Magellanic Cloud'},
u'ResultCode': 3, u'StatusCode': 100}
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
result = get_ned_data(identifiers, 'identifiers')
expected = {'skipped': [], 'data': {u'LMC': {'id': 'LMC', 'canonical': u'Large Magellanic Cloud'}}}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_ned_objects_unknown_object(self):
'''Test to see if retrieval of NED objects method behaves as expected'''
from object_service.NED import get_ned_data
identifiers = map(str, list(range(4)))
def get_mock_data(v, status_code=100):
mockdata = {u'NameResolver': u'NED-Egret',
u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'FOO BAR'}}
try:
mockdata['ResultCode'] = int(v)
except:
mockdata['ResultCode'] = 0
mockdata['StatusCode'] = status_code
return mockdata
def request_callback(request, uri, headers):
data = request.body
v = json.loads(request.body)["name"]["v"]
try:
return (200, headers, json.dumps(get_mock_data(v)))
except:
return (200, headers, json.dumps(get_mock_data('0')))
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body=request_callback)
result = get_ned_data(identifiers, 'identifiers')
expected = {'data': {'3': {'canonical': u'FOO BAR', 'id': '3'}},
'skipped': ['0','1','2']}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_ned_objects_unsuccessful(self):
'''Test to see if retrieval of NED objects method behaves as expected'''
from object_service.NED import get_ned_data
identifiers = ["LMC"]
mockdata = {u'NameResolver': u'NED-Egret', u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'Large Magellanic Cloud'},
u'ResultCode': 0, u'StatusCode': 300}
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
result = get_ned_data(identifiers, 'identifiers')
expected = {'data': {}, 'skipped': ['LMC']}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_ned_objects_unexpected_resultcode(self):
'''Test to see if retrieval of NED objects method behaves as expected'''
from object_service.NED import get_ned_data
identifiers = ["LMC"]
mockdata = {u'NameResolver': u'NED-Egret', u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'Large Magellanic Cloud'},
u'ResultCode': 10, u'StatusCode': 100}
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
result = get_ned_data(identifiers, 'identifiers')
expected = {'data': {}, 'skipped': ['LMC']}
self.assertEqual(result, expected)
@httpretty.activate
def test_get_ned_objects_service_error(self):
'''Test to see if retrieval of NED objects method behaves as expected'''
from object_service.NED import get_ned_data
identifiers = ["LMC"]
mockdata = {u'NameResolver': u'NED-Egret', u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'Large Magellanic Cloud'},
u'ResultCode': 10, u'StatusCode': 100}
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=500,
body='%s'%json.dumps(mockdata))
result = get_ned_data(identifiers, 'identifiers')
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED returned status 500'}
self.assertEqual(result, expected)
@httpretty.activate
def test_do_ned_query(self):
'''Test to see if single NED object lookup behaves'''
from object_service.NED import do_ned_object_lookup
identifier = "LMC"
mockdata = {u'NameResolver': u'NED-Egret', u'Copyright': u'(C) 2017 California Institute of Technology',
u'Preferred': {u'Name': u'Large Magellanic Cloud'},
u'ResultCode': 3, u'StatusCode': 100}
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=500,
body='%s'%json.dumps(mockdata))
result = do_ned_object_lookup(QUERY_URL, identifier)
expected = {"Error": "Unable to get results!", "Error Info": "NED returned status 500"}
self.assertEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_lookup_readtimeout(self, mocked_post):
'''Test to see if single NED object lookup throws proper exception at timeout'''
from object_service.NED import do_ned_object_lookup
mocked_post.side_effect = ReadTimeout('Connection timed out.')
self.app.config['OBJECTS_NED_TIMEOUT'] = 0.1
QUERY_URL = "http://aaaa.org"
result = do_ned_object_lookup(QUERY_URL, "bar")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request timed out: Connection timed out.'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_query_identifiers_readtimeout(self, mocked_post):
'''Test to see if single NED query throws proper exception at timeout'''
from object_service.NED import get_ned_data
mocked_post.side_effect = ReadTimeout('Connection timed out.')
identifiers = ['FOO_BAR']
self.app.config['OBJECTS_NED_TIMEOUT'] = 0.1
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
result = get_ned_data(identifiers, "identifiers")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request timed out: Connection timed out.'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_query_objects_readtimeout(self, mocked_post):
'''Test to see if single NED query throws proper exception at timeout'''
from object_service.NED import get_ned_data
mocked_post.side_effect = ReadTimeout('Connection timed out.')
identifiers = ['FOO_BAR']
self.app.config['OBJECTS_NED_TIMEOUT'] = 0.1
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
result = get_ned_data(identifiers, "objects")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request timed out: Connection timed out.'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_lookup_exception(self, mocked_post):
'''Test to see if single NED lookupthrows proper exception at timeout'''
from object_service.NED import do_ned_object_lookup
mocked_post.side_effect = Exception('Oops! Something went boink!')
self.app.config['OBJECTS_NED_TIMEOUT'] = 0.1
QUERY_URL = "http://aaaa.org"
result = do_ned_object_lookup(QUERY_URL, "bar")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request failed (Oops! Something went boink!)'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_query_identifiers_exception(self, mocked_post):
'''Test to see if single NED query hrows proper exception at timeout'''
from object_service.NED import get_ned_data
mocked_post.side_effect = Exception('Oops! Something went boink!')
identifiers = ['FOO_BAR']
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
result = get_ned_data(identifiers, "identifiers")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request failed (Oops! Something went boink!)'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.post')
def test_do_ned_query_objects_exception(self, mocked_post):
'''Test to see if single NED query hrows proper exception at timeout'''
from object_service.NED import get_ned_data
mocked_post.side_effect = Exception('Oops! Something went boink!')
identifiers = ['FOO_BAR']
QUERY_URL = self.app.config.get('OBJECTS_NED_URL')
result = get_ned_data(identifiers, "objects")
expected = {'Error': 'Unable to get results!', 'Error Info': 'NED request failed (Oops! Something went boink!)'}
self.assertDictEqual(result, expected)
def test_ned_simple_query(self):
'''Test to see if the "simple" input type works properly'''
from object_service.NED import get_ned_data
identifiers = ['NGC_1234','Abell_678']
result = get_ned_data(identifiers, 'simple')
expected = {'data': {'Abell_678': {'canonical': 'Abell 678', 'id': 'Abell_678'},
'NGC_1234': {'canonical': 'NGC 1234', 'id': 'NGC_1234'}},
'skipped':[]
}
self.assertDictEqual(result, expected)
def test_ned_unknown_inputtype(self):
'''Test to see if unknown input type works properly'''
from object_service.NED import get_ned_data
identifiers = ['NGC_1234','Abell_678']
result = get_ned_data(identifiers, 'foo')
expected = {'Error': 'Unable to get results!', 'Error Info': 'Unknown input type specified!'}
self.assertDictEqual(result, expected)
def test_simbad_unknown_inputtype(self):
'''Test to see if unknown input type works properly'''
from object_service.SIMBAD import get_simbad_data
identifiers = ['NGC_1234','Abell_678']
result = get_simbad_data(identifiers, 'foo')
expected = {'Error': 'Unable to get results!', 'Error Info': 'Unknown input type specified!'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.SIMBAD.current_app.client.post')
def test_get_simbad_objects_timeout(self, mocked_post):
'''Test to see if retrieval of SIMBAD objects method behaves as expected'''
from object_service.SIMBAD import get_simbad_data
mocked_post.side_effect = ReadTimeout('Connection timed out.')
identifiers = ["3133169", "1575544"]
self.app.config['OBJECTS_SIMBAD_TIMEOUT'] = 1
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
result = get_simbad_data(identifiers, 'identifiers')
expected = {'Error': 'Unable to get results!', 'Error Info': 'SIMBAD request timed out: Connection timed out.'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.SIMBAD.current_app.client.post')
def test_get_simbad_position_query_timeout(self, mocked_post):
'''Test to see if SIMBAD position query method behaves as expected'''
from object_service.SIMBAD import simbad_position_query
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
mocked_post.side_effect = ReadTimeout('Connection timed out.')
identifiers = ["3133169", "1575544"]
self.app.config['OBJECTS_SIMBAD_TIMEOUT'] = 1
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
c = SkyCoord("0.1 0.1", unit=(u.deg, u.deg))
r = Angle('0.1 degrees')
result = simbad_position_query(c, r)
expected = {'Error Info': 'SIMBAD request timed out: Connection timed out.', 'Error': 'Unable to get results!'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.get')
def test_get_ned_position_query_timeout(self, mocked_post):
'''Test to see if NED position query method behaves as expected'''
from object_service.NED import ned_position_query
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
mocked_post.side_effect = ReadTimeout('Connection timed out.')
identifiers = ["3133169", "1575544"]
self.app.config['OBJECTS_SIMBAD_TIMEOUT'] = 1
QUERY_URL = self.app.config.get('OBJECTS_NED_OBJSEARCH')
c = SkyCoord("11.1 11.1", unit=(u.deg, u.deg))
r = Angle('0.1 degrees')
result = ned_position_query(c, r)
expected = {'Error Info': 'NED cone search timed out: Connection timed out.', 'Error': 'Unable to get results!'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.NED.current_app.client.get')
def test_get_ned_position_query_exception(self, mocked_post):
'''Test to see if NED position query method behaves as expected'''
from object_service.NED import ned_position_query
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
mocked_post.side_effect = Exception('Houston, we have a problem!')
identifiers = ["3133169", "1575544"]
self.app.config['OBJECTS_SIMBAD_TIMEOUT'] = 1
QUERY_URL = self.app.config.get('OBJECTS_NED_OBJSEARCH')
c = SkyCoord("11.1 11.1", unit=(u.deg, u.deg))
r = Angle('0.1 degrees')
result = ned_position_query(c, r)
expected = {'Error Info': 'NED cone search failed (Houston, we have a problem!)', 'Error': 'Unable to get results!'}
self.assertDictEqual(result, expected)
@mock.patch('object_service.SIMBAD.current_app.client.post')
def test_get_simbad_objects_exception(self, mocked_post):
'''Test to see if retrieval of SIMBAD objects method behaves as expected'''
from object_service.SIMBAD import get_simbad_data
mocked_post.side_effect = Exception('Oops! Something went boink!')
identifiers = ["3133169", "1575544"]
self.app.config['OBJECTS_SIMBAD_TIMEOUT'] = 1
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
result = get_simbad_data(identifiers, 'identifiers')
expected = {'Error': 'Unable to get results!', 'Error Info': 'SIMBAD request failed (not timeout): Oops! Something went boink!'}
self.assertDictEqual(result, expected)
@httpretty.activate
def test_do_cone_search(self):
'''Test to see if SIMBAD cone search method behaves as expected'''
from object_service.utils import parse_position_string
from object_service.SIMBAD import simbad_position_query
pstring = "80.89416667 -69.75611111:0.166666"
mockdata = {"data":[["2003A&A...405..111G"],["2011AcA....61..103G"]]}
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# First parse the position string and see we if get the expected results back
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], ['80.8942','-69.7561',0.166666])
# Next query with this positional information
result = simbad_position_query(coords, radius)
expected = [u'2011AcA....61..103G', u'2003A&A...405..111G']
for b in expected:
self.assertIn(b, result)
@mock.patch('object_service.SIMBAD.current_app.client.post')
def test_do_cone_search_exception(self, mocked_post):
'''Test to see if SIMBAD cone search method behaves as expected'''
from object_service.SIMBAD import simbad_position_query
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
mocked_post.side_effect = Exception('Oops! Something went boink!')
pstring = "80.89416667 -69.75611111:0.166666"
mockdata = {"data":[["2003A&A...405..111G"],["2011AcA....61..103G"]]}
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
# First parse the position string and see we if get the expected results back
c = SkyCoord("80.89416667 -69.7561111", unit=(u.deg, u.deg))
r = Angle('0.2 degrees')
result = simbad_position_query(c, r)
expected = {'Error Info': 'SIMBAD request failed (not timeout): Oops! Something went boink!', 'Error': u'Unable to get results!'}
self.assertDictEqual(result, expected)
@httpretty.activate
def test_do_cone_search_malformed_response(self):
'''Test to see if SIMBAD cone search method behaves as expected'''
from object_service.SIMBAD import simbad_position_query
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
mockdata = {"foo":"bar"}
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# First parse the position string and see we if get the expected results back
c = SkyCoord("80.89416667 -69.7561111", unit=(u.deg, u.deg))
r = Angle('0.2 degrees')
result = simbad_position_query(c, r)
expected = {'Error Info': 'Unable to retrieve SIMBAD identifiers from SIMBAD response (no "data" key)!', 'Error': 'Unable to get results!'}
self.assertDictEqual(result, expected)
def test_parse_position_string_default_radius(self):
'''Test to see if SIMBAD cone search method interprets position string correctly'''
from object_service.utils import parse_position_string
from object_service.utils import IncorrectPositionFormatError
pstring = "80.89416667 -69.75611111:0.166666"
# Get the value of the default radius
default_radius = self.app.config.get('OBJECTS_DEFAULT_RADIUS')
# First parse the position string and see we if get the expected results back
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', 0.166666])
# An invalid radius results in the the default radius
pstring = "80.89416667 -69.75611111:1 2 3 4"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', default_radius])
# Check if the hms to decimal conversion works as expected
pstring = "80.89416667 -69.75611111:1 30"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', 1.5])
# No radius in input string results in default radius
pstring = "80.89416667 -69.75611111"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', default_radius])
# Invalid hms string results in default radius
pstring = "80.89416667 -69.75611111:a b"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', default_radius])
#
pstring = "80.89416667 -69.75611111:a"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'80.8942', u'-69.7561', default_radius])
# There has to be RA and DEC
pstring = "80.89416667"
error = ''
try:
result = parse_position_string(pstring)
except IncorrectPositionFormatError:
error = 'Incorrect Position Format'
self.assertEqual(error, 'Incorrect Position Format')
# Check position strings of the format "hh mm ss [+-]dd mm ss"
pstring = "18 04 20.99 -29 31 08.9"
coords, radius = parse_position_string(pstring)
RA, DEC = coords.to_string('decimal').split()
self.assertEqual([RA, DEC, radius.degree], [u'271.087', u'-29.5191', default_radius])
def test_cleanup_obhect_names_simbad(self):
'''Test to see if SIMBAD cleans up object string correctly'''
from object_service.SIMBAD import cleanup_object_name
# The function should remove catalogue prefixes
cats = ['NAME','*','V*','SV*']
objects = ["%s foobar"%c for c in cats]
result = list(set([cleanup_object_name(o) for o in objects]))
self.assertEqual(result, ['foobar'])
@httpretty.activate
def test_tap_verification(self):
'''Test if verification of TAP service works'''
from object_service.SIMBAD import verify_tap_service
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
def request_callback(request, uri, headers):
data = request.body
status = 200
return (status, headers, '%s'%json.dumps(mockdata))
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body=request_callback)
v = verify_tap_service()
self.assertEqual(v, QUERY_URL)
@httpretty.activate
def test_tap_verification_switch(self):
'''Test if verification of TAP service works: switch to CDS mirror on error'''
from object_service.SIMBAD import verify_tap_service
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
def request_callback(request, uri, headers):
data = request.body
status = 500
return (status, headers, '%s'%json.dumps(mockdata))
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body=request_callback)
v = verify_tap_service()
expected = self.app.config.get('OBJECTS_SIMBAD_TAP_URL_CDS')
self.assertEqual(v, expected)
@httpretty.activate
def test_tap_verification_switch_2(self):
'''Test if verification of TAP service works: switch to CDS mirror on no data'''
from object_service.SIMBAD import verify_tap_service
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
emptydata= {"data":[]}
def request_callback(request, uri, headers):
data = request.body
status = 200
return (status, headers, '%s'%json.dumps(emptydata))
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body=request_callback)
v = verify_tap_service()
expected = self.app.config.get('OBJECTS_SIMBAD_TAP_URL_CDS')
self.assertEqual(v, expected)
@timeout_decorator.timeout(2)
def timeout(s):
time.sleep(s)
return s
class TestTimeOut(TestCase):
'''Check if the timeout decorator works as expected'''
def create_app(self):
'''Create the wsgi application'''
_app = app.create_app()
return _app
def test_timeout(self):
'''Test if timeout decorator works properly'''
try:
res = timeout(1)
except timeout_decorator.timeout_decorator.TimeoutError:
res = 'timeout'
self.assertEqual(res, 1)
try:
res = timeout(3)
except timeout_decorator.timeout_decorator.TimeoutError:
res = 'timeout'
self.assertEqual(res, 'timeout')
if __name__ == '__main__':
unittest.main()
|
|
"""
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import threading
import schedule
import time
import traceback
import numpy as np
import logging
import pandas as pd
from datetime import datetime, timedelta
from friartuck.Robinhood import Robinhood
from friartuck.quote_source import FriarTuckQuoteSource
from friartuck import utc_to_local
from collections import Iterable
from threading import Thread
log = logging.getLogger("friar_tuck")
class FriarContext:
def __init__(self):
self.is_market_open = False
self.account = None
self.portfolio = None
def __str__(self):
return str(self.__dict__)
class Security:
def __init__(self, symbol, simple_name, min_tick_size=None, is_tradeable=False, security_type=None, security_detail=None):
self.symbol = symbol
self.simple_name = simple_name
self.min_tick_size = min_tick_size
self.is_tradeable = is_tradeable
self.security_type = security_type
self.security_detail = {} # this the raw hash
if security_detail:
self.security_detail = security_detail
def price_convert_up_by_tick_size(self, price):
if not self.min_tick_size or self.min_tick_size == 0.0:
return price
return round(np.math.ceil(price / self.min_tick_size) * self.min_tick_size, 7)
def price_convert_down_by_tick_size(self, price):
if not self.min_tick_size or self.min_tick_size == 0.0:
return price
return round(np.math.floor(price / self.min_tick_size) * self.min_tick_size, 7)
def __str__(self):
return str(self.__dict__)
class FriarData:
def __init__(self, friar_tuck_live):
self.friar_tuck_live = friar_tuck_live
"""
Params:
Security security[1...n]: can be a list
Int bar_count: Number of quote records to return
String frequency: 1m|1h|1d
String field[1...n]: None=All, possible fields ["open","high","low","close","volume","price"]
"""
def history(self, security, bar_count=1, frequency="1d", field=None):
return self.friar_tuck_live.history(security, bar_count, frequency, field)
"""
Params:
Security security[1...n]: can be a list
String field[1...n]: None=All, possible fields ["open","high","low","close","volume","price"]
"""
def current(self, security, field=None):
return self.friar_tuck_live.current(security, field)
def can_trade(self, security):
return self.friar_tuck_live.can_trade(security)
class OrderType:
def __init__(self, price=None, stop_price=None):
self.price = price
self.stop_price = stop_price
def __str__(self):
return str(self.__dict__)
def is_market_order(self):
if self.price or self.stop_price:
return False
return True
class Order:
def __init__(self, id):
self.id = id
"""
Integer: The status of the order.
0 = Open
1 = Filled
2 = Cancelled
3 = Rejected
4 = Held
"""
self.status = None
self.created = None
self.updated = None
self.stop = None
self.limit = None
self.amount = 0
self.symbol = None
self.filled = 0
self.commission = 0
self.rejected_reason = None
self.time_in_force = None
def __str__(self):
return str(self.__dict__)
class Position:
def __init__(self, amount=0, cost_basis=0, last_sale_price=0, created=None, updated=None):
self.amount = amount
self.cost_basis = cost_basis
self.last_sale_price = last_sale_price
self.created = created
self.updated = updated
self.day_cost_basis = 0.0
self.day_amount = 0
def __str__(self):
return str(self.__dict__)
class Portfolio:
def __init__(self):
self.capital_used = 0
self.cash = 0
self.pnl = 0
self.positions = {}
self.portfolio_value = 0
self.positions_value = 0
self.returns = 0
self.starting_cash = 0
self.start_date = None
def __str__(self):
return str(self.__dict__)
class Account:
def __init__(self):
self.accrued_interest = 0
self.available_funds = 0
self.buying_power = 0
self.cushion = 0
self.day_trades_remaining = 0
self.equity_with_loan = 0
self.excess_liquidity = 0
self.initial_margin_requirement = 0
self.leverage = 0
self.maintenance_margin_requirement = 0
self.net_leverage = 0
self.net_liquidation = 0
self.settled_cash = 0
self.total_positions_value = 0
self.unallocated_margin_cash = 0
def __str__(self):
return str(self.__dict__)
def synchronized_method(method):
outer_lock = threading.Lock()
lock_name = "__" + method.__name__ + "_lock" + "__"
def sync_method(self, *args, **kws):
with outer_lock:
if not hasattr(self, lock_name): setattr(self, lock_name, threading.Lock())
lock = getattr(self, lock_name)
with lock:
return method(self, *args, **kws)
return sync_method
class FriarTuckLive:
config = None
context = None
active_algo = None
_active_datetime = None
is_market_open = False
# Protection from abuse
_fetched_securities_cache = {}
_next_data_reloadable_time = datetime.now()
_initialized = False
_market_closed_lastupdate = False
_starting_cash = None
_start_date = datetime.now()
_current_security_bars = {}
_security_last_known_price = {}
_order_status_map = {"confirmed": 0, "partially_filled": 0, "filled": 1, "cancelled": 2, "rejected": 3, "queued": 4, "unconfirmed": 4, "failed": 5}
# def __init__(self, user_name, password, data_frequency="1h"):
def __init__(self, config, data_frequency="1h"):
self.config = config
if not self._initialized:
self.run_thread = None
self.engine_running = False
self.stop_engine = False
self._data_frequency = data_frequency
self._active_datetime = datetime.now()
# self._active_datetime = temp_datetime.replace(second=0, microsecond=0)
# self.long_only=False
self.quote_source = FriarTuckQuoteSource(config)
self.context = FriarContext()
self.rh_session = Robinhood()
self.rh_session.login(username=config.get('LOGIN', 'username'), password=config.get('LOGIN', 'password'))
self.friar_data = FriarData(self)
def set_active_algo(self, active_algo):
self._current_security_bars = {}
self._load_all_data()
self.active_algo = active_algo
self.active_algo.initialize(self.context, self.friar_data)
def get_datetime(self):
return self._active_datetime
# block=True means the thread will not return,
# if false it will return and the caller will need to keep the program from exiting, thus killing the engine
def run_engine(self):
self.stop_engine = False
if not self._initialized:
self._time_interval_processor()
self._initialized = True
if not self.run_thread or not self.run_thread.is_alive():
self.run_thread = Thread(target=self.run_scheduler, args=('schedule_maintainer',))
self.run_thread.setDaemon(True)
self.run_thread.start()
def stop_engine(self):
if not self.run_thread or not self.run_thread.is_alive():
return
self.stop_engine = True
def run_scheduler(self, name):
self.engine_running = True
log.info("**** running - %s" % name)
while 1:
schedule.run_pending()
time.sleep(1)
if self.stop_engine:
break
self.engine_running = False
log.info("**** exiting - %s" % name)
def history(self, security, bar_count=1, frequency="1d", field=None, since_last_quote_time=None):
symbol_map = security_to_symbol_map(security)
quotes = self.quote_source.fetch_quotes(symbol=symbol_map.keys(), bar_count=bar_count, frequency=frequency, field=field, market_open=self.is_market_open, since_last_quote_time=since_last_quote_time)
if not isinstance(security, Iterable):
return quotes[security.symbol]
sec_quotes = {}
for sym in quotes:
sec_quotes[symbol_map[sym]] = quotes[sym]
return sec_quotes
def can_trade(self, security):
return security.is_tradeable
@synchronized_method
def current(self, security, field, since_last_quote_time=None):
now_secs = datetime.now().second
if now_secs < 10:
# we need to wait 10 seconds after the minute to load current data... this is so the source can be ready.
time.sleep(10 - now_secs)
if not isinstance(security, Iterable):
if security not in self._current_security_bars:
security_bars = self.history(security, bar_count=1, frequency=self._data_frequency, field=None, since_last_quote_time=since_last_quote_time)
# log.info(security_bars)
self._current_security_bars[security] = security_bars
if self._current_security_bars[security] is None or self._current_security_bars[security].empty:
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
self._current_security_bars[security] = pd.DataFrame(index=pd.DatetimeIndex([quote_date]),
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0)})
# print("price %s " % self._current_security_bars[security].iloc[-1]["price"])
if self._current_security_bars[security] is not None: # and (not self._current_security_bars[security].empty or self._current_security_bars[security].iloc[-1]["price"] == float["nan"]):
last_price_list = self.rh_session.get_quote_list(security.symbol, 'symbol,last_trade_price,bid_price,bid_size,ask_price,ask_size')
if last_price_list and len(last_price_list) > 0:
self._current_security_bars[security]["price"] = float(last_price_list[0][1])
self._current_security_bars[security]["bid_price"] = float(last_price_list[0][2])
self._current_security_bars[security]["bid_size"] = float(last_price_list[0][3])
self._current_security_bars[security]["ask_price"] = float(last_price_list[0][4])
self._current_security_bars[security]["ask_size"] = float(last_price_list[0][5])
else:
# self._current_security_bars[security]["price"] = float("nan")
self._current_security_bars[security]["bid_price"] = float("nan")
self._current_security_bars[security]["bid_size"] = float("nan")
self._current_security_bars[security]["ask_price"] = float("nan")
self._current_security_bars[security]["ask_size"] = float("nan")
if not field:
return self._current_security_bars[security].iloc[-1]
# log.info("security_bars(%s): %s" % (security.symbol, self._current_security_bars[security]))
return self._current_security_bars[security].iloc[-1][field]
else:
symbol_list_map = {}
return_bars = {}
for sec in security:
symbol_list_map[sec.symbol] = sec
if sec not in self._current_security_bars:
security_bars = self.history(sec, bar_count=1, frequency=self._data_frequency, field=None, since_last_quote_time=since_last_quote_time)
if security_bars is None:
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
security_bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]),
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0)})
self._current_security_bars[sec] = security_bars
if self._current_security_bars[sec] is not None: # and (not self._current_security_bars[sec].empty or self._current_security_bars[sec].iloc[-1]["price"] == float["nan"]):
last_price_list = self.rh_session.get_quote_list(sec.symbol, 'symbol,last_trade_price,bid_price,bid_size,ask_price,ask_size')
if last_price_list and len(last_price_list) > 0:
if sec in self._current_security_bars:
self._current_security_bars[sec]["price"] = float(last_price_list[0][1])
self._current_security_bars[sec]["bid_price"] = float(last_price_list[0][2])
self._current_security_bars[sec]["bid_size"] = float(last_price_list[0][3])
self._current_security_bars[sec]["ask_price"] = float(last_price_list[0][4])
self._current_security_bars[sec]["ask_size"] = float(last_price_list[0][5])
if not field:
return_bars[sec] = self._current_security_bars[sec].iloc[-1]
return_bars[sec] = self._current_security_bars[sec].iloc[-1][field]
return return_bars
def get_order(self, id):
if not id:
return
url = self.rh_session.endpoints['orders'] + id + "/"
order_data = self.rh_session.get_url_content_json(url)
return self._build_order_object(order_data, symbol=None)
def get_open_orders(self, security=None):
open_orders = {}
order_data = self.rh_session.order_history()
# log.info("order_data: %s" % order_data)
if order_data and "results" in order_data:
for result in order_data["results"]:
status = self._order_status_map[result["state"]]
if status not in [0, 4]:
# not open order
continue
instrument = self.rh_session.get_url_content_json(result["instrument"])
symbol = instrument["symbol"]
if security and security.symbol != symbol:
# not for the the security desired
continue
order = self._build_order_object(result, symbol)
if symbol not in open_orders:
open_orders[symbol] = []
open_orders[symbol].append(order)
if security:
if security.symbol in open_orders:
return open_orders[security.symbol]
return []
return open_orders
def get_last_filled_buy_order(self, security):
return self._get_last_filled_order_by_side(security=security)["buy"]
def get_last_filled_sell_order(self, security):
return self._get_last_filled_order_by_side(security=security)["sell"]
def get_last_filled_orders_by_side(self, security):
return self._get_last_filled_order_by_side(security=security)
def _get_last_filled_order_by_side(self, security):
latest_buy_order = None
latest_sell_order = None
order_data = self.rh_session.order_history()
# log.info("order_data: %s" % order_data)
if order_data and "results" in order_data:
for result in order_data["results"]:
status = self._order_status_map[result["state"]]
if status not in [1]: # or result["side"] != side:
# not open order
continue
instrument = self.rh_session.get_url_content_json(result["instrument"])
symbol = instrument["symbol"]
if security and security.symbol != symbol:
# not for the the security desired
continue
if result["side"] == "buy":
if not latest_buy_order:
latest_buy_order = self._build_order_object(result, symbol)
continue
updated = utc_to_local(datetime.strptime(result["updated_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
if latest_buy_order.updated > updated:
continue
latest_buy_order = result
if result["side"] == "sell":
if not latest_sell_order:
latest_sell_order = self._build_order_object(result, symbol)
continue
updated = utc_to_local(datetime.strptime(result["updated_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
if latest_sell_order.updated > updated:
continue
latest_sell_order = result
return {"sell": latest_sell_order, "buy": latest_buy_order}
def cancel_order(self, order_id):
url = self.rh_session.endpoints['orders'] + order_id + "/"
order_data = self.rh_session.get_url_content_json(url)
status = self._order_status_map[order_data["state"]]
if status not in [0, 4]:
log.info("order is not open, no need to cancel: %s" % order_data)
return
if order_data and "cancel" in order_data and order_data["cancel"]:
self.rh_session.post_url_content_json(order_data["cancel"])
def order_value(self, security, amount, order_type=None, time_in_force='gfd'):
if order_type and order_type.stop_price:
price = order_type.stop_price
else:
price = self.current(security, "price")
shares = int(amount / price)
return self.order_shares(security, shares, order_type, time_in_force)
def order_shares(self, security, shares, order_type=None, time_in_force='gfd'):
if not order_type:
# Since an order type was not passed lets use MarketOrder
order_type = OrderType()
trigger = 'immediate'
if order_type.stop_price:
trigger = "stop"
tran_type = 'market'
if order_type.price:
tran_type = "limit"
transaction = "buy"
if shares < 0:
transaction = "sell"
price = order_type.price
if shares > 0 and order_type.stop_price and not price:
# price = security.price_convert_up_by_tick_size(order_type.stop_price + (order_type.stop_price * 0.05)) # Complying with Robinhood 5% collared
price = security.price_convert_up_by_tick_size(order_type.stop_price) # Complying with Robinhood 5% collared
if price:
price = round(price, 2)
order_data = self.rh_session.place_order(security.security_detail, quantity=np.abs([shares])[0],
price=price, stop_price=order_type.stop_price,
transaction=transaction, trigger=trigger, order=tran_type,
time_in_force=time_in_force)
if order_data and "reject_reason" in order_data and order_data["reject_reason"]:
log.warning("Appears the order was rejected: %s" % order_data["reject_reason"])
if order_data:
return order_data['id']
return None
# def _set_long_only(self):
# self.long_only=True
def _build_order_object(self, result, symbol=None):
status = self._order_status_map[result["state"]]
# log.debug(result)
if not symbol:
instrument = self.rh_session.get_url_content_json(result["instrument"])
symbol = instrument["symbol"]
order = Order(result["id"])
order.status = status
order.created = utc_to_local(datetime.strptime(result["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
order.updated = utc_to_local(datetime.strptime(result["updated_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
order.stop = None
order.limit = None
if result["trigger"] == "stop":
order.stop = float(result["stop_price"])
if result["type"] == "limit":
order.limit = float(result["price"])
order.amount = int(float(result["quantity"]))
if result["side"] == "sell":
order.amount = -order.amount
order.symbol = symbol
order.filled = int(float(result["cumulative_quantity"]))
if result["side"] == "sell":
order.filled = -order.filled
order.commission = float(result["fees"])
order.rejected_reason = result["reject_reason"]
order.time_in_force = result["time_in_force"]
return order
def fetch_and_build_security(self, symbol, sec_detail=None):
if symbol in self._fetched_securities_cache:
return self._fetched_securities_cache[symbol]
if not sec_detail:
sec_details = self.rh_session.instruments(symbol)
if sec_details and len(sec_details) > 0:
for result in sec_details:
if result['symbol'] == symbol:
sec_detail = result
break
if not sec_detail:
return None
# sec_detail = sec_details[0]
symbol = sec_detail['symbol']
is_tradeable = sec_detail['tradeable']
sec_type = sec_detail['type']
simple_name = sec_detail['simple_name']
min_tick_size = None
if "min_tick_size" in sec_detail and sec_detail['min_tick_size']:
min_tick_size = float(sec_detail['min_tick_size'])
sec = Security(symbol, simple_name, min_tick_size, is_tradeable, sec_type, sec_detail)
self._fetched_securities_cache[symbol] = sec
return sec
def _time_interval_processor(self):
now = datetime.now()
if now.weekday() not in [5, 6]:
# if now.weekday() not in [6]:
log.debug("In time interval processor")
temp_datetime = datetime.now()
self._active_datetime = temp_datetime.replace(second=0, microsecond=0)
market_open_temp = self.is_market_open
self._current_security_bars = {}
if not self._load_all_data():
# Load Data Failed... we can't go further until we get fresh data... bad things can happend if algo operate with stale data.
# Set reload data again in 1 minute.
log.debug("Data retrieval was adnormal we'll check again next minute to try again ")
_set_trigger_timer(minute_interval=1, callback_function=self._time_interval_processor)
return schedule.CancelJob
# update context status
self.context.is_market_open = self.is_market_open
if (not self._initialized or not market_open_temp) and self.is_market_open:
# if market was not open and is now open... initialize algo
try:
if hasattr(self.active_algo, 'on_market_open'):
self.active_algo.on_market_open(self.context, self.friar_data)
# self.active_algo.handle_data(self.context, self.friar_data)
except Exception as inst:
log.error("Error occurred while invoking initialize: %s " % inst)
traceback.print_exc()
if self._data_frequency == "1d":
# with this frequency, it's a market closed last update whenever this method is call
self._market_closed_lastupdate = True
elif self._data_frequency == "1h":
minutes_after_open_time = self.market_opens_at + timedelta(hours=1)
minutes_after_open_time = minutes_after_open_time.replace(minute=0, second=0, microsecond=0)
elif self._data_frequency == "15m":
minutes_after_open_time = self.market_opens_at + timedelta(minutes=15)
minutes_after_open_time = minutes_after_open_time.replace(second=0, microsecond=0)
elif self._data_frequency == "5m":
minutes_after_open_time = self.market_opens_at + timedelta(minutes=5)
minutes_after_open_time = minutes_after_open_time.replace(second=0, microsecond=0)
else:
minutes_after_open_time = self.market_opens_at + timedelta(minutes=1) # Adding one more call
if market_open_temp and not self.is_market_open:
# If market used to be open and at this update is now closed, we want to call handle_data one more time
self._market_closed_lastupdate = True # we want the algo to be called one more time.
current_time = datetime.now()
try:
if (self.is_market_open and current_time >= minutes_after_open_time) or self._market_closed_lastupdate: # current_time < minutes_after_close_time:
self.active_algo.handle_data(self.context, self.friar_data)
if self._market_closed_lastupdate:
self._market_closed_lastupdate = False
except Exception as e:
log.error("Error occurred while invoking handle_data: %s " % e)
traceback.print_exc()
if self._data_frequency == "1d":
direct_time = self.market_closes_at
if datetime.now() >= direct_time:
market_info = self.rh_session.get_url_content_json(self.market_info["next_open_hours"])
direct_time = utc_to_local(datetime.strptime(market_info["closes_at"], "%Y-%m-%dT%H:%M:%SZ"))
direct_time = direct_time + timedelta(minutes=5) # wait 5 minutes after market closes
elif self._data_frequency == "1h":
if not self.is_market_open and datetime.now() < self.market_opens_at:
direct_time = self.market_opens_at
elif not self.is_market_open and datetime.now() > self.market_closes_at:
market_info = self.rh_session.get_url_content_json(self.market_info["next_open_hours"])
direct_time = utc_to_local(datetime.strptime(market_info["opens_at"], "%Y-%m-%dT%H:%M:%SZ"))
else:
direct_time = datetime.now() + timedelta(hours=1) # update every hour
direct_time = direct_time.replace(minute=0, second=0, microsecond=0)
elif self._data_frequency == "15m":
if not self.is_market_open and datetime.now() < self.market_opens_at:
direct_time = self.market_opens_at
elif not self.is_market_open and datetime.now() > self.market_closes_at:
market_info = self.rh_session.get_url_content_json(self.market_info["next_open_hours"])
direct_time = utc_to_local(datetime.strptime(market_info["opens_at"], "%Y-%m-%dT%H:%M:%SZ"))
else:
now = datetime.now()
multiples = int(now.minute / 15)
diff = now.minute - (multiples * 15)
direct_time = now + timedelta(minutes=(15 - diff))
# direct_time = datetime.now() + timedelta(minutes=15) # update every 15 minutes
direct_time = direct_time.replace(second=0, microsecond=0)
elif self._data_frequency == "5m":
if not self.is_market_open and datetime.now() < self.market_opens_at:
direct_time = self.market_opens_at
elif not self.is_market_open and datetime.now() > self.market_closes_at:
market_info = self.rh_session.get_url_content_json(self.market_info["next_open_hours"])
direct_time = utc_to_local(datetime.strptime(market_info["opens_at"], "%Y-%m-%dT%H:%M:%SZ"))
else:
now = datetime.now()
multiples = int(now.minute / 5)
diff = now.minute - (multiples * 5)
direct_time = now + timedelta(minutes=(5 - diff))
# direct_time = datetime.now() + timedelta(minutes=5) # update every 5 minutes
direct_time = direct_time.replace(second=0, microsecond=0)
else:
direct_time = datetime.now() + timedelta(minutes=1) # update every minute
direct_time = direct_time.replace(second=0, microsecond=0)
# log.debug("Interval Processing Done - Next Trigger %s" % direct_time)
_set_trigger_timer(callback_function=self._time_interval_processor, direct_time=direct_time)
return schedule.CancelJob
def _load_all_data(self):
current_time = datetime.now()
if self._next_data_reloadable_time > current_time:
return True
try:
self.__load_market_info()
except Exception as e:
log.error("Error occurred while loading market info: %s " % e)
traceback.print_exc()
if not self.market_opens_at or not self.market_closes_at:
return False
try:
self.__load_profile_info()
except Exception as e:
log.error("Error occurred while loading profile info: %s " % e)
traceback.print_exc()
return False
after_close_time = self.market_closes_at + timedelta(minutes=60) # 1 hour after close
before_open_time = self.market_opens_at - timedelta(minutes=120) # 2hours before open
if current_time > after_close_time or current_time < before_open_time:
# we are in after hours, we don't want to tax the rh server, lets load less frequently
self._next_data_reloadable_time = datetime.now() + timedelta(
hours=1) # Can't reload more than once within 1 hour
self._next_data_reloadable_time = self._next_data_reloadable_time.replace(minute=0, second=0, microsecond=0)
else:
self._next_data_reloadable_time = datetime.now() + timedelta(
seconds=10) # Can't reload more than once within 10 seconds
return True
def __load_market_info(self):
market_info = self.rh_session.market_info()
if "opens_at" not in market_info or not market_info["opens_at"] or "closes_at" not in market_info or not market_info["closes_at"]:
market_info = self.rh_session.get_url_content_json(market_info["next_open_hours"])
self.market_info = market_info
# self.market_opens_at = datetime.now().replace(hour=8, minute=30, second=0, microsecond=0) # utc_to_local(datetime.strptime(market_info["opens_at"], "%Y-%m-%dT%H:%M:%SZ"))
self.market_opens_at = utc_to_local(datetime.strptime(market_info["opens_at"], "%Y-%m-%dT%H:%M:%SZ"))
self.market_closes_at = utc_to_local(datetime.strptime(market_info["closes_at"], "%Y-%m-%dT%H:%M:%SZ"))
current_time = datetime.now()
if (current_time >= self.market_opens_at) and (current_time < self.market_closes_at):
self.is_market_open = True
else:
self.is_market_open = False
log.debug("market opens_at=%s, closes_at=%s, now=%s, is_market_open=%s" % (self.market_opens_at, self.market_closes_at, current_time, self.is_market_open))
def __load_profile_info(self):
pos_infos = self.rh_session.positions()
port_info = self.rh_session.portfolios()
acct_info = self.rh_session.get_account()
# log.info("pos_infos:%s" % pos_infos)
# log.info("account_info:%s" % acct_info)
# log.info("port_info:%s" % port_info)
unsettled_funds = float(acct_info["unsettled_funds"])
market_value = float(port_info["market_value"])
equity = float(port_info["equity"])
yesterday_equity = float(port_info["equity_previous_close"])
uncleared_deposits = float(acct_info["uncleared_deposits"])
cash_held_for_orders = float(acct_info["cash_held_for_orders"])
cash = float(acct_info["cash"])
total_cash = cash + unsettled_funds
portfolio_value = equity
buying_power = equity-market_value-cash_held_for_orders
if not self._starting_cash:
self._starting_cash = portfolio_value
if not self._start_date:
self._start_date = datetime.now()
returns = 0
if self._starting_cash and self._starting_cash > 0:
returns = (portfolio_value - self._starting_cash) / self._starting_cash
long_position_value = 0
short_position_value = 0
unrealized_pl = 0
positions = {}
# log.info("pos: %s" % pos_infos["results"])
if pos_infos and pos_infos["results"]:
for result in pos_infos["results"]:
amount = int(float(result["quantity"]))
if amount == 0:
continue
log.info("pos_infos:%s" % result)
instrument = self.rh_session.get_url_content_json(result["instrument"])
symbol = instrument["symbol"]
security = self.fetch_and_build_security(symbol, sec_detail=instrument)
# last_price = self.current(security, field="price")
last_price = float(self.rh_session.last_trade_price(symbol)[0][0])
log.debug(last_price)
# if not last_price:
# Lets try again
# last_price = self.current(security, field="price")
if not last_price and security in self._security_last_known_price:
last_price = self._security_last_known_price[security]
self._security_last_known_price[security] = last_price
created = utc_to_local(datetime.strptime(result["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
updated = utc_to_local(datetime.strptime(result["updated_at"], "%Y-%m-%dT%H:%M:%S.%fZ"))
cost_basis = float(result["average_buy_price"])
position = Position(amount, cost_basis, last_price, created, updated)
if "intraday_quantity" in result:
position.day_amount = int(float(result["intraday_quantity"]))
if "intraday_average_buy_price" in result:
position.day_cost_basis = int(float(result["intraday_average_buy_price"]))
positions[security] = position
# position_value = position_value+(cost_basis*amount)
if amount > 0:
unrealized_pl = unrealized_pl + ((last_price * amount) - (cost_basis * amount))
long_position_value = long_position_value + (cost_basis * amount)
else:
unrealized_pl = unrealized_pl + ((cost_basis * np.abs([amount])[0]) - (last_price * np.abs([amount])[0]))
short_position_value = long_position_value + (cost_basis * np.abs([amount])[0])
pnl = equity-uncleared_deposits-yesterday_equity # unrealized_pl + unsettled_funds
leverage = 0
net_leverage = 0
if portfolio_value > 0:
leverage = (long_position_value + short_position_value) / portfolio_value
net_leverage = market_value / portfolio_value
portfolio = Portfolio()
portfolio.capital_used = np.abs([(short_position_value - long_position_value)])[0]
portfolio.cash = total_cash
portfolio.pnl = pnl
portfolio.positions = positions
portfolio.portfolio_value = portfolio_value
portfolio.positions_value = market_value
portfolio.returns = returns
portfolio.starting_cash = self._starting_cash
portfolio.start_date = self._start_date
self.context.portfolio = portfolio
account = Account()
# account.accrued_interest=acct_info
account.available_funds = portfolio_value
account.buying_power = buying_power
account.cushion = total_cash / portfolio_value
account.day_trades_remaining = float("inf")
account.equity_with_loan = portfolio_value
account.excess_liquidity = port_info["excess_margin"]
account.initial_margin_requirement = float(acct_info["margin_balances"]["margin_limit"]) if "margin_balances" in acct_info and "margin_limit" in acct_info["margin_balances"] else 0
account.leverage = leverage
account.maintenance_margin_requirement = portfolio_value-float(port_info["excess_margin"])
account.net_leverage = net_leverage
account.net_liquidation = portfolio_value
account.settled_cash = cash
account.total_positions_value = market_value
if "unallocated_margin_cash" in acct_info:
account.unallocated_margin_cash = float(acct_info["unallocated_margin_cash"])
self.context.account = account
def _set_trigger_timer(callback_function, minute_interval=None, direct_time=None):
log.debug("setting trigger direct_time=%s, minute_interval= %s " % (direct_time, minute_interval))
if not minute_interval and not direct_time:
log.error("Bad trigger timer request... one of the following is required (minute_interval, direct_time)")
return
if direct_time:
dt = direct_time
else:
dt = datetime.now() + timedelta(minutes=minute_interval) # update every minute
dt = dt.replace(second=0, microsecond=0)
str_time = dt.strftime("%H:%M")
schedule.every().day.at(str_time).do(callback_function)
def security_to_symbol_map(security):
if not isinstance(security, list):
return {security.symbol: security}
symbols = {}
for sec in security:
symbols[sec.symbol] = sec
return symbols
|
|
import json
import logging
from datetime import date, timedelta
from django.conf import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.views.decorators.http import require_GET
from tower import ugettext as _
from kitsune.announcements.views import user_can_announce
from kitsune.dashboards import PERIODS
from kitsune.dashboards.readouts import (
l10n_overview_rows, kb_overview_rows, READOUTS, L10N_READOUTS,
CONTRIBUTOR_READOUTS)
from kitsune.dashboards.utils import render_readouts, get_locales_by_visit
from kitsune.products.models import Product
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import smart_int
from kitsune.wiki.config import CATEGORIES
from kitsune.wiki.models import Locale
log = logging.getLogger('k.dashboards')
def _kb_readout(request, readout_slug, readouts, locale=None, mode=None,
product=None):
"""Instantiate and return the readout with the given slug.
Raise Http404 if there is no such readout.
"""
if readout_slug not in readouts:
raise Http404
return readouts[readout_slug](request, locale=locale, mode=mode,
product=product)
def _kb_detail(request, readout_slug, readouts, main_view_name,
main_dash_title, locale=None, product=None):
"""Show all the rows for the given KB article statistics table."""
return render(request, 'dashboards/kb_detail.html', {
'readout': _kb_readout(request, readout_slug, readouts, locale,
product=product),
'locale': locale,
'main_dash_view': main_view_name,
'main_dash_title': main_dash_title,
'product': product,
'products': Product.objects.filter(visible=True)})
@require_GET
def contributors_detail(request, readout_slug):
"""Show all the rows for the given contributor dashboard table."""
product = _get_product(request)
return _kb_detail(request, readout_slug, CONTRIBUTOR_READOUTS,
'dashboards.contributors', _('Knowledge Base Dashboard'),
locale=settings.WIKI_DEFAULT_LANGUAGE, product=product)
@require_GET
def contributors_overview(request):
product = _get_product(request)
return render(request, 'dashboards/contributors_overview.html', {
'overview_rows': kb_overview_rows(
locale=request.LANGUAGE_CODE, product=product,
mode=smart_int(request.GET.get('mode'), None),
max=None),
'main_dash_view': 'dashboards.contributors',
'main_dash_title': _('Knowledge Base Dashboard'),
'locale': request.LANGUAGE_CODE,
'product': product,
'products': Product.objects.filter(visible=True)})
@require_GET
def localization_detail(request, readout_slug):
"""Show all the rows for the given localizer dashboard table."""
product = _get_product(request)
return _kb_detail(request, readout_slug, L10N_READOUTS,
'dashboards.localization', _('Localization Dashboard'),
product=product)
@require_GET
def localization(request):
"""Render aggregate data about articles in a non-default locale."""
if request.LANGUAGE_CODE == settings.WIKI_DEFAULT_LANGUAGE:
return HttpResponseRedirect(reverse('dashboards.contributors'))
locales = Locale.objects.filter(locale=request.LANGUAGE_CODE)
if locales:
permission = user_can_announce(request.user, locales[0])
else:
permission = False
product = _get_product(request)
data = {
'overview_rows': l10n_overview_rows(
request.LANGUAGE_CODE, product=product),
'user_can_announce': permission,
}
return render_readouts(request, L10N_READOUTS, 'localization.html',
extra_data=data, product=product)
@require_GET
def contributors(request):
"""Render aggregate data about the articles in the default locale."""
product = _get_product(request)
category = _get_category(request)
return render_readouts(
request,
CONTRIBUTOR_READOUTS,
'contributors.html',
locale=settings.WIKI_DEFAULT_LANGUAGE,
product=product,
extra_data={
'overview_rows': kb_overview_rows(
locale=request.LANGUAGE_CODE, product=product,
mode=smart_int(request.GET.get('mode'), None),
max=smart_int(request.GET.get('max'), 10),
category=category),
'overview_modes': PERIODS,
'category': category,
'categories': CATEGORIES,
})
@require_GET
def contributors_old(request):
"""Render aggregate data about the articles in the default locale."""
product = _get_product(request)
return render_readouts(
request,
CONTRIBUTOR_READOUTS,
'contributors_old.html',
locale=settings.WIKI_DEFAULT_LANGUAGE,
product=product)
@require_GET
def wiki_rows(request, readout_slug):
"""Return the table contents HTML for the given readout and mode."""
product = _get_product(request)
readout = _kb_readout(request, readout_slug, READOUTS,
locale=request.GET.get('locale'),
mode=smart_int(request.GET.get('mode'), None),
product=product)
max_rows = smart_int(request.GET.get('max'), fallback=None)
return HttpResponse(readout.render(max_rows=max_rows))
@require_GET
def contributors_overview_rows(request):
product = _get_product(request)
overview_rows = kb_overview_rows(
locale=request.LANGUAGE_CODE, product=product,
mode=smart_int(request.GET.get('mode'), None),
max=smart_int(request.GET.get('max'), 10))
return render(request, 'dashboards/includes/kb_overview.html', {
'overview_rows': overview_rows})
@require_GET
def locale_metrics(request, locale_code):
"""The kb metrics dashboard for a specific locale."""
if locale_code not in settings.SUMO_LANGUAGES:
raise Http404
product = _get_product(request)
return render(
request,
'dashboards/locale_metrics.html',
{
'current_locale': locale_code,
'product': product,
'products': Product.objects.filter(visible=True),
})
@require_GET
def aggregated_metrics(request):
"""The aggregated (all locales) kb metrics dashboard."""
today = date.today()
locales = get_locales_by_visit(today - timedelta(days=30), today)
product = _get_product(request)
return render(
request,
'dashboards/aggregated_metrics.html',
{
'locales_json': json.dumps(settings.SUMO_LANGUAGES),
'locales': locales,
'product': product,
'products': Product.objects.filter(visible=True),
})
def _get_product(request):
product_slug = request.GET.get('product')
if product_slug:
return get_object_or_404(Product, slug=product_slug)
return None
def _get_category(request):
category = request.GET.get('category')
if category:
for c in CATEGORIES:
if str(c[0]) == category:
return c[0]
raise Http404('Invalid category.')
return None
|
|
"""
How to run locally:
Start your local registry:
`INDEX_ENDPOINT=https://indexstaging-docker.dotcloud.com \
SETTINGS_FLAVOR=test DOCKER_REGISTRY_CONFIG=config_sample.yml docker-registry`
Start the tests:
`DOCKER_REGISTRY_ENDPOINT=http://localhost:5000 SETTINGS_FLAVOR=test \
DOCKER_REGISTRY_CONFIG=config_sample.yml DOCKER_CREDS=USER:PASS \
nosetests --tests=tests/workflow.py`
"""
import hashlib
import os
import requests
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
sess.mount('https://', adapter)
requests = sess
from docker_registry.lib import checksums
from docker_registry.lib import config
import docker_registry.storage as storage
import base
from docker_registry.core import compat
json = compat.json
StringIO = compat.StringIO
cfg = config.load()
ua = 'docker/0.11 registry test pretending to be docker'
class TestWorkflow(base.TestCase):
# Dev server needs to run on port 5000 in order to run this test
registry_endpoint = os.environ.get(
'DOCKER_REGISTRY_ENDPOINT',
'https://registrystaging-docker.dotcloud.com')
index_endpoint = os.environ.get(
'DOCKER_INDEX_ENDPOINT',
'https://indexstaging-docker.dotcloud.com')
user_credentials = os.environ['DOCKER_CREDS'].split(':')
def generate_chunk(self, data):
bufsize = 1024
io = StringIO(data)
while True:
buf = io.read(bufsize)
if not buf:
return
yield buf
io.close()
def upload_image(self, image_id, parent_id, token):
layer = self.gen_random_string(7 * 1024 * 1024)
json_obj = {
'id': image_id
}
if parent_id:
json_obj['parent'] = parent_id
json_data = json.dumps(json_obj)
h = hashlib.sha256(json_data + '\n')
h.update(layer)
layer_checksum = 'sha256:{0}'.format(h.hexdigest())
resp = requests.put('{0}/v1/images/{1}/json'.format(
self.registry_endpoint, image_id),
data=json_data,
headers={'Authorization': 'Token ' + token,
'User-Agent': ua,
'X-Docker-Checksum': layer_checksum},
)
self.assertEqual(resp.status_code, 200, resp.text)
resp = requests.put('{0}/v1/images/{1}/layer'.format(
self.registry_endpoint, image_id),
data=self.generate_chunk(layer),
headers={'Authorization': 'Token ' + token,
'User-Agent': ua},
)
resp = requests.put('{0}/v1/images/{1}/checksum'.format(
self.registry_endpoint, image_id),
data={},
headers={'Authorization': 'Token ' + token,
'X-Docker-Checksum-Payload': layer_checksum,
'User-Agent': ua}
)
self.assertEqual(resp.status_code, 200, resp.text)
return {'id': image_id, 'checksum': layer_checksum}
def update_tag(self, namespace, repos, image_id, tag_name):
resp = requests.put('{0}/v1/repositories/{1}/{2}/tags/{3}'.format(
self.registry_endpoint, namespace, repos, tag_name),
data=json.dumps(image_id),
)
self.assertEqual(resp.status_code, 200, resp.text)
def docker_push(self):
# Test Push
self.image_id = self.gen_random_string()
self.parent_id = self.gen_random_string()
image_id = self.image_id
parent_id = self.parent_id
namespace = self.user_credentials[0]
repos = self.gen_random_string()
# Docker -> Index
images_json = json.dumps([{'id': image_id}, {'id': parent_id}])
resp = requests.put('{0}/v1/repositories/{1}/{2}/'.format(
self.index_endpoint, namespace, repos),
auth=tuple(self.user_credentials),
headers={'X-Docker-Token': 'true',
'User-Agent': ua},
data=images_json)
self.assertEqual(resp.status_code, 200, resp.text)
self.token = resp.headers.get('x-docker-token')
# Docker -> Registry
images_json = []
images_json.append(self.upload_image(parent_id, None, self.token))
images_json.append(self.upload_image(image_id, parent_id, self.token))
# Updating the tags does not need a token, it will use the Cookie
self.update_tag(namespace, repos, image_id, 'latest')
# Docker -> Index
resp = requests.put('{0}/v1/repositories/{1}/{2}/images'.format(
self.index_endpoint, namespace, repos),
auth=tuple(self.user_credentials),
headers={'X-Endpoints': self.registry_endpoint,
'User-Agent': ua},
data=json.dumps(images_json))
self.assertEqual(resp.status_code, 204)
return (namespace, repos)
def fetch_image(self, image_id):
"""Return image json metadata, checksum and its blob."""
resp = requests.get('{0}/v1/images/{1}/json'.format(
self.registry_endpoint, image_id),
)
self.assertEqual(resp.status_code, 200, resp.text)
resp = requests.get('{0}/v1/images/{1}/json'.format(
self.registry_endpoint, image_id),
headers={'Authorization': 'Token ' + self.token}
)
self.assertEqual(resp.status_code, 200, resp.text)
json_data = resp.text
checksum = resp.headers['x-docker-payload-checksum']
resp = requests.get('{0}/v1/images/{1}/layer'.format(
self.registry_endpoint, image_id),
)
self.assertEqual(resp.status_code, 200, resp.text)
resp = requests.get('{0}/v1/images/{1}/layer'.format(
self.registry_endpoint, image_id),
headers={'Authorization': 'Token ' + self.token}
)
self.assertEqual(resp.status_code, 200, resp.text)
return (json_data, checksum, resp.text)
def docker_pull(self, namespace, repos):
# Test pull
# Docker -> Index
resp = requests.get('{0}/v1/repositories/{1}/{2}/images'.format(
self.index_endpoint, namespace, repos),)
self.assertEqual(resp.status_code, 200)
resp = requests.get('{0}/v1/repositories/{1}/{2}/images'.format(
self.index_endpoint, namespace, repos),
auth=tuple(self.user_credentials),
headers={'X-Docker-Token': 'true'})
self.assertEqual(resp.status_code, 200)
self.token = resp.headers.get('x-docker-token')
# Here we should use the 'X-Endpoints' returned in a real environment
# Docker -> Registry
resp = requests.get('{0}/v1/repositories/{1}/{2}/tags/latest'.format(
self.registry_endpoint, namespace, repos),
headers={'Authorization': 'Token ' + self.token})
self.assertEqual(resp.status_code, 200, resp.text)
resp = requests.get('{0}/v1/repositories/{1}/{2}/tags/latest'.format(
self.registry_endpoint, namespace, repos),
)
self.assertEqual(resp.status_code, 200, resp.text)
# Docker -> Registry
# Note(dmp): unicode patch XXX not applied assume requests does the job
image_id = json.loads(resp.text)
resp = requests.get('{0}/v1/images/{1}/ancestry'.format(
self.registry_endpoint, image_id),
)
self.assertEqual(resp.status_code, 200, resp.text)
# Note(dmp): unicode patch XXX not applied assume requests does the job
ancestry = json.loads(resp.text)
# We got the ancestry, let's fetch all the images there
for image_id in ancestry:
json_data, checksum, blob = self.fetch_image(image_id)
# check queried checksum and local computed checksum from the image
# are the same
tmpfile = StringIO()
tmpfile.write(blob)
tmpfile.seek(0)
computed_checksum = checksums.compute_simple(tmpfile, json_data)
tmpfile.close()
self.assertEqual(checksum, computed_checksum)
# Remove the repository
resp = requests.delete('{0}/v1/repositories/{1}/{2}/images'.format(
self.registry_endpoint, namespace, repos), )
self.assertEqual(resp.status_code, 204, resp.text)
# Remove image_id, then parent_id
store = storage.load()
try:
store.remove(os.path.join(store.images, self.image_id))
except Exception:
pass
try:
store.remove(os.path.join(store.images, self.parent_id))
except Exception:
pass
def test_workflow(self):
(namespace, repos) = self.docker_push()
self.docker_pull(namespace, repos)
|
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RPC Controller
"""
import datetime
import traceback
from oslo.config import cfg
import six
from webob import exc
from glance.common import client
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.openstack.common.importutils as imp
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
LOG = logging.getLogger(__name__)
rpc_opts = [
# NOTE(flaper87): Shamelessly copied
# from oslo rpc.
cfg.ListOpt('allowed_rpc_exception_modules',
default=['openstack.common.exception',
'glance.common.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
class RPCJSONSerializer(wsgi.JSONResponseSerializer):
def _sanitizer(self, obj):
def to_primitive(_type, _value):
return {"_type": _type, "_value": _value}
if isinstance(obj, datetime.datetime):
return to_primitive("datetime", timeutils.strtime(obj))
return super(RPCJSONSerializer, self)._sanitizer(obj)
class RPCJSONDeserializer(wsgi.JSONRequestDeserializer):
def _to_datetime(self, obj):
return timeutils.parse_strtime(obj)
def _sanitizer(self, obj):
try:
_type, _value = obj["_type"], obj["_value"]
return getattr(self, "_to_" + _type)(_value)
except (KeyError, AttributeError):
return obj
class Controller(object):
"""
Base RPCController.
This is the base controller for RPC based APIs. Commands
handled by this controller respect the following form:
[{
'command': 'method_name',
'kwargs': {...}
}]
The controller is capable of processing more than one command
per request and will always return a list of results.
:params raise_exc: Boolean that specifies whether to raise
exceptions instead of "serializing" them.
"""
def __init__(self, raise_exc=False):
self._registered = {}
self.raise_exc = raise_exc
def register(self, resource, filtered=None, excluded=None, refiner=None):
"""
Exports methods through the RPC Api.
:params resource: Resource's instance to register.
:params filtered: List of methods that *can* me registered. Read
as "Method must be in this list".
:params excluded: List of methods to exclude.
:params refiner: Callable to use as filter for methods.
:raises AssertionError: If refiner is not callable.
"""
funcs = filter(lambda x: not x.startswith("_"), dir(resource))
if filtered:
funcs = [f for f in funcs if f in filtered]
if excluded:
funcs = [f for f in funcs if f not in excluded]
if refiner:
assert callable(refiner), "Refiner must be callable"
funcs = filter(refiner, funcs)
for name in funcs:
meth = getattr(resource, name)
if not callable(meth):
continue
self._registered[name] = meth
def __call__(self, req, body):
"""
Executes the command
"""
if not isinstance(body, list):
msg = _("Request must be a list of commands")
raise exc.HTTPBadRequest(explanation=msg)
def validate(cmd):
if not isinstance(cmd, dict):
msg = _("Bad Command: %s") % str(cmd)
raise exc.HTTPBadRequest(explanation=msg)
command, kwargs = cmd.get("command"), cmd.get("kwargs")
if (not command or not isinstance(command, six.string_types) or
(kwargs and not isinstance(kwargs, dict))):
msg = _("Wrong command structure: %s") % (str(cmd))
raise exc.HTTPBadRequest(explanation=msg)
method = self._registered.get(command)
if not method:
# Just raise 404 if the user tries to
# access a private method. No need for
# 403 here since logically the command
# is not registered to the rpc dispatcher
raise exc.HTTPNotFound(explanation=_("Command not found"))
return True
# If more than one command were sent then they might
# be intended to be executed sequentially, that for,
# lets first verify they're all valid before executing
# them.
commands = filter(validate, body)
results = []
for cmd in commands:
# kwargs is not required
command, kwargs = cmd["command"], cmd.get("kwargs", {})
method = self._registered[command]
try:
result = method(req.context, **kwargs)
except Exception as e:
if self.raise_exc:
raise
cls, val = e.__class__, utils.exception_to_str(e)
msg = (_("RPC Call Error: %(val)s\n%(tb)s") %
dict(val=val, tb=traceback.format_exc()))
LOG.error(msg)
# NOTE(flaper87): Don't propagate all exceptions
# but the ones allowed by the user.
module = cls.__module__
if module not in CONF.allowed_rpc_exception_modules:
cls = exception.RPCError
val = six.text_type(exception.RPCError(cls=cls, val=val))
cls_path = "%s.%s" % (cls.__module__, cls.__name__)
result = {"_error": {"cls": cls_path, "val": val}}
results.append(result)
return results
class RPCClient(client.BaseClient):
def __init__(self, *args, **kwargs):
self._serializer = RPCJSONSerializer()
self._deserializer = RPCJSONDeserializer()
self.raise_exc = kwargs.pop("raise_exc", True)
self.base_path = kwargs.pop("base_path", '/rpc')
super(RPCClient, self).__init__(*args, **kwargs)
@client.handle_unauthenticated
def bulk_request(self, commands):
"""
Execute multiple commands in a single request.
:params commands: List of commands to send. Commands
must respect the following form:
{
'command': 'method_name',
'kwargs': method_kwargs
}
"""
body = self._serializer.to_json(commands)
response = super(RPCClient, self).do_request('POST',
self.base_path,
body)
return self._deserializer.from_json(response.read())
def do_request(self, method, **kwargs):
"""
Simple do_request override. This method serializes
the outgoing body and builds the command that will
be sent.
:params method: The remote python method to call
:params kwargs: Dynamic parameters that will be
passed to the remote method.
"""
content = self.bulk_request([{'command': method,
'kwargs': kwargs}])
# NOTE(flaper87): Return the first result if
# a single command was executed.
content = content[0]
# NOTE(flaper87): Check if content is an error
# and re-raise it if raise_exc is True. Before
# checking if content contains the '_error' key,
# verify if it is an instance of dict - since the
# RPC call may have returned something different.
if self.raise_exc and (isinstance(content, dict)
and '_error' in content):
error = content['_error']
try:
exc_cls = imp.import_class(error['cls'])
raise exc_cls(error['val'])
except ImportError:
# NOTE(flaper87): The exception
# class couldn't be imported, using
# a generic exception.
raise exception.RPCError(**error)
return content
def __getattr__(self, item):
"""
This method returns a method_proxy that
will execute the rpc call in the registry
service.
"""
if item.startswith('_'):
raise AttributeError(item)
def method_proxy(**kw):
return self.do_request(item, **kw)
return method_proxy
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GYP and GN
MB is a wrapper script for GYP and GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import pprint
import shlex
import shutil
import sys
import subprocess
import tempfile
def main(args):
mbw = MetaBuildWrapper()
mbw.ParseArgs(args)
return mbw.args.func()
class MetaBuildWrapper(object):
def __init__(self):
p = os.path
d = os.path.dirname
self.chromium_src_dir = p.normpath(d(d(d(p.abspath(__file__)))))
self.default_config = p.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.platform = sys.platform
self.args = argparse.Namespace()
self.configs = {}
self.masters = {}
self.mixins = {}
self.private_configs = []
self.common_dev_configs = []
self.unsupported_configs = []
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-g', '--goma-dir', default=self.ExpandUser('~/goma'),
help='path to goma directory (default is %(default)s).')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-q', '--quiet', action='store_true',
help='Do not print anything on success, '
'just return an exit code.')
subp.add_argument('-v', '--verbose', action='count',
help='verbose logging (may specify multiple times).')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-q', '--quiet', action='store_true',
help='Do not print anything on success, '
'just return an exit code.')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def CmdAnalyze(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNAnalyze(vals)
elif vals['type'] == 'gyp':
return self.RunGYPAnalyze(vals)
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdGen(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNGen(vals)
if vals['type'] == 'gyp':
return self.RunGYPGen(vals)
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdLookup(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
cmd = self.GNCmd('gen', '<path>', vals['gn_args'])
elif vals['type'] == 'gyp':
cmd = self.GYPCmd('<path>', vals['gyp_defines'], vals['gyp_config'])
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
self.PrintCmd(cmd)
return 0
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdValidate(self):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Figure out the whole list of configs and ensure that no config is
# listed in more than one category.
all_configs = {}
for config in self.common_dev_configs:
all_configs[config] = 'common_dev_configs'
for config in self.private_configs:
if config in all_configs:
errs.append('config "%s" listed in "private_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'private_configs'
for config in self.unsupported_configs:
if config in all_configs:
errs.append('config "%s" listed in "unsupported_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'unsupported_configs'
for master in self.masters:
for builder in self.masters[master]:
config = self.masters[master][builder]
if config in all_configs and all_configs[config] not in self.masters:
errs.append('Config "%s" used by a bot is also listed in "%s".' %
(config, all_configs[config]))
else:
all_configs[config] = master
# Check that every referenced config actually exists.
for config, loc in all_configs.items():
if not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr('mb config file %s has problems:\n ' + '\n '.join(errs))
if not self.args.quiet:
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
self.ReadConfigFile()
config = self.ConfigFromArgs()
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
return self.FlattenConfig(config)
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.common_dev_configs = contents['common_dev_configs']
self.configs = contents['configs']
self.masters = contents['masters']
self.mixins = contents['mixins']
self.private_configs = contents['private_configs']
self.unsupported_configs = contents['unsupported_configs']
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
return self.masters[self.args.master][self.args.builder]
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = {
'type': None,
'gn_args': [],
'gyp_config': [],
'gyp_defines': [],
}
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
# TODO: check for cycles in mixins.
visited.append(m)
mixin_vals = self.mixins[m]
if 'type' in mixin_vals:
vals['type'] = mixin_vals['type']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'gyp_config' in mixin_vals:
vals['gyp_config'] = mixin_vals['gyp_config']
if 'gyp_defines' in mixin_vals:
if vals['gyp_defines']:
vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
else:
vals['gyp_defines'] = mixin_vals['gyp_defines']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def RunGNGen(self, vals):
path = self.args.path[0]
cmd = self.GNCmd('gen', path, vals['gn_args'])
swarming_targets = []
if self.args.swarming_targets_file:
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use ninja_to_gn.pyl to convert
# the compile targets to the matching GN labels.
contents = self.ReadFile(self.args.swarming_targets_file)
swarming_targets = contents.splitlines()
gn_isolate_map = ast.literal_eval(self.ReadFile(os.path.join(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
gn_labels = []
for target in swarming_targets:
if not target in gn_isolate_map:
raise MBErr('test target "%s" not found in %s' %
(target, '//testing/buildbot/gn_isolate_map.pyl'))
gn_labels.append(gn_isolate_map[target]['label'])
gn_runtime_deps_path = self.ToAbsPath(path, 'runtime_deps')
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(path))
self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
for target in swarming_targets:
if sys.platform == 'win32':
deps_path = self.ToAbsPath(path, target + '.exe.runtime_deps')
else:
deps_path = self.ToAbsPath(path, target + '.runtime_deps')
if not self.Exists(deps_path):
raise MBErr('did not generate %s' % deps_path)
command, extra_files = self.GetIsolateCommand(target, vals,
gn_isolate_map)
runtime_deps = self.ReadFile(deps_path).splitlines()
isolate_path = self.ToAbsPath(path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s%s%s.isolated' % (path, os.sep, target)),
'--isolate',
self.ToSrcRelPath('%s%s%s.isolate' % (path, os.sep, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return ret
def GNCmd(self, subcommand, path, gn_args=''):
if self.platform == 'linux2':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'linux64',
'gn')
elif self.platform == 'darwin':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'mac',
'gn')
else:
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'win',
'gn.exe')
cmd = [gn_path, subcommand, path]
gn_args = gn_args.replace("$(goma_dir)", self.args.goma_dir)
if gn_args:
cmd.append('--args=%s' % gn_args)
return cmd
def RunGYPGen(self, vals):
path = self.args.path[0]
output_dir, gyp_config = self.ParseGYPConfigPath(path)
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
ret, _, _ = self.Run(cmd)
return ret
def RunGYPAnalyze(self, vals):
output_dir, gyp_config = self.ParseGYPConfigPath(self.args.path[0])
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
if self.args.verbose:
inp = self.GetAnalyzeInput()
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
cmd.extend(['-G', 'config_path=%s' % self.args.input_path[0],
'-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
ret, _, _ = self.Run(cmd)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(self.args.output_path[0]))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return ret
def RunGNIsolate(self, vals):
build_path = self.args.path[0]
inp = self.ReadInputJSON(['targets'])
if self.args.verbose:
self.Print()
self.Print('isolate input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
for target in inp['targets']:
runtime_deps_path = self.ToAbsPath(build_path, target + '.runtime_deps')
if not self.Exists(runtime_deps_path):
self.WriteFailureAndRaise('"%s" does not exist' % runtime_deps_path,
output_path)
command, extra_files = self.GetIsolateCommand(target, vals, None)
runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
isolate_path = self.ToAbsPath(build_path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s/%s.isolated' % (build_path, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_path, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return 0
def GetIsolateCommand(self, target, vals, gn_isolate_map):
# This needs to mirror the settings in //build/config/ui.gni:
# use_x11 = is_linux && !use_ozone.
# TODO(dpranke): Figure out how to keep this in sync better.
use_x11 = (sys.platform == 'linux2' and
not 'target_os="android"' in vals['gn_args'] and
not 'use_ozone=true' in vals['gn_args'])
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
executable_suffix = '.exe' if sys.platform == 'win32' else ''
test_type = gn_isolate_map[target]['type']
cmdline = []
extra_files = []
if use_x11 and test_type == 'windowed_test_launcher':
extra_files = [
'xdisplaycheck',
'../../testing/test_env.py',
'../../testing/xvfb.py',
]
cmdline = [
'../../testing/xvfb.py',
'.',
'./' + str(target),
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'./' + str(target) + executable_suffix,
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('raw'):
extra_files = []
cmdline = [
'./' + str(target) + executable_suffix,
] + gn_isolate_map[target].get('args')
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return os.path.join(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
# TODO: Support normal paths in addition to source-absolute paths.
assert(path.startswith('//'))
return path[2:].replace('/', os.sep)
def ParseGYPConfigPath(self, path):
rpath = self.ToSrcRelPath(path)
output_dir, _, config = rpath.rpartition('/')
self.CheckGYPConfigIsSupported(config, path)
return output_dir, config
def CheckGYPConfigIsSupported(self, config, path):
if config not in ('Debug', 'Release'):
if (sys.platform in ('win32', 'cygwin') and
config not in ('Debug_x64', 'Release_x64')):
raise MBErr('Unknown or unsupported config type "%s" in "%s"' %
config, path)
def GYPCmd(self, output_dir, gyp_defines, config):
gyp_defines = gyp_defines.replace("$(goma_dir)", self.args.goma_dir)
cmd = [
sys.executable,
os.path.join('build', 'gyp_chromium'),
'-G',
'output_dir=' + output_dir,
'-G',
'config=' + config,
]
for d in shlex.split(gyp_defines):
cmd += ['-D', d]
return cmd
def RunGNAnalyze(self, vals):
# analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals)
if ret:
return ret
inp = self.ReadInputJSON(['files', 'targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
# Bail out early if a GN file was modified, since 'gn refs' won't know
# what to do about it.
if any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']):
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# Bail out early if 'all' was asked for, since 'gn refs' won't recognize it.
if 'all' in inp['targets']:
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
return 0
ret = 0
response_file = self.TempFile()
response_file.write('\n'.join(inp['files']) + '\n')
response_file.close()
matching_targets = []
try:
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all', '--as=output']
ret, out, _ = self.Run(cmd)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
build_dir = self.ToSrcRelPath(self.args.path[0]) + os.sep
for output in out.splitlines():
build_output = output.replace(build_dir, '')
if build_output in inp['targets']:
matching_targets.append(build_output)
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all']
ret, out, _ = self.Run(cmd)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
for label in out.splitlines():
build_target = label[2:]
# We want to accept 'chrome/android:chrome_shell_apk' and
# just 'chrome_shell_apk'. This may result in too many targets
# getting built, but we can adjust that later if need be.
for input_target in inp['targets']:
if (input_target == build_target or
build_target.endswith(':' + input_target)):
matching_targets.append(input_target)
finally:
self.RemoveFile(response_file.name)
if matching_targets:
# TODO: it could be that a target X might depend on a target Y
# and both would be listed in the input, but we would only need
# to specify target X as a build_target (whereas both X and Y are
# targets). I'm not sure if that optimization is generally worth it.
self.WriteJSON({'targets': sorted(matching_targets),
'build_targets': sorted(matching_targets),
'status': 'Found dependency'}, output_path)
else:
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(output_path))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path)
raise MBErr(msg)
def WriteJSON(self, obj, path):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n')
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd):
if cmd[0] == sys.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[pipes.quote(c) for c in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
def Run(self, cmd):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose:
self.PrintCmd(cmd)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd)
if self.args.verbose:
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd):
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except MBErr as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("interrupted, exiting", stream=sys.stderr)
sys.exit(130)
|
|
import logging
from collections import namedtuple
import datetime, math
import tkp.db
from tkp.db.generic import get_db_rows_as_dicts
from tkp.db.database import Database
from tkp.db.orm import DataSet, Image
from tkp.db import general as dbgen
from tkp.db import nulldetections
import tkp.testutil.data as testdata
import tkp.utility.coordinates as coords
ExtractedSourceTuple = namedtuple("ExtractedSourceTuple",
['ra', 'dec' ,
'ra_fit_err' , 'dec_fit_err' ,
'peak' , 'peak_err',
'flux', 'flux_err',
'sigma',
'beam_maj', 'beam_min', 'beam_angle',
'ew_sys_err', 'ns_sys_err',
'error_radius', 'fit_type',
'chisq', 'reduced_chisq'
])
def delete_test_database(database):
"""
Use with caution!
NB. Not the same as a freshly initialised database.
All the sequence counters are offset.
"""
if database.database.lower().find("test") != 0:
raise ValueError("You tried to delete a database not prefixed with 'test'.\n"
"Not recommended!")
try:
#cursor = database.connection.cursor()
query = "DELETE from runningcatalog_flux"
tkp.db.execute(query, commit=True)
query = "DELETE from assocxtrsource"
tkp.db.execute(query, commit=True)
query = "DELETE from assocskyrgn"
tkp.db.execute(query, commit=True)
query = "DELETE from temprunningcatalog"
tkp.db.execute(query, commit=True)
query = "DELETE from newsource"
tkp.db.execute(query, commit=True)
query = "DELETE from runningcatalog"
tkp.db.execute(query, commit=True)
query = "DELETE from extractedsource"
tkp.db.execute(query, commit=True)
query = "DELETE from image"
tkp.db.execute(query, commit=True)
query = "DELETE from skyregion"
tkp.db.execute(query, commit=True)
query = "DELETE from dataset"
tkp.db.execute(query, commit=True)
except database.connection.Error:
logging.warn("Query failed when trying to blank database\n"
"Query: " + query)
raise
def example_dbimage_data_dict(**kwargs):
"""
Defines the canonical default image-data for unit-testing the database.
By defining this in one place we make it simple to make changes.
A subset of the default values may be overridden by passing the keys
as keyword-args.
Note that while RA, Dec and extraction radius are arbitrary,
they should (usually) be close enough and large enough to enclose
the RA and Dec of any fake source extractions inserted, since the
association routines reject sources outside of designated extraction
regions.
"""
starttime = datetime.datetime(2012, 1, 1) # Happy new year
time_spacing = datetime.timedelta(seconds=600)
init_im_params = {'tau_time':300,
'freq_eff':140e6,
'freq_bw':2e6,
'taustart_ts':starttime,
'beam_smaj_pix': float(2.7),
'beam_smin_pix': float(2.3),
'beam_pa_rad': float(1.7),
'deltax': float(-0.01111),
'deltay': float(0.01111),
'url':testdata.fits_file, # just an arbitrary existing fits file
'centre_ra': 123., # Arbitarily picked.
'centre_decl': 10., # Arbitarily picked.
'xtr_radius': 10., # (Degrees)
'rms_qc': 1.,
'rms_min': 1e-4, #0.1mJy RMS
'rms_max': 3e-4, #0.3mJy RMS
'detection_thresh': 6,
'analysis_thresh': 3
}
init_im_params.update(kwargs)
return init_im_params
def generate_timespaced_dbimages_data(n_images,
timedelta_between_images=datetime.timedelta(days=1),
**kwargs):
"""
Generate a list of image data dictionaries.
The image-data dicts are identical except for having the taustart_ts
advanced by a fixed timedelta for each entry.
These can be used to create known entries in the image table, for
unit-testing.
A subset of the image-data defaults may be overridden by passing the relevant
dictionary values as keyword args.
"""
init_im_params = example_dbimage_data_dict(**kwargs)
im_params = []
for i in range(n_images):
im_params.append(init_im_params.copy())
init_im_params['taustart_ts'] += timedelta_between_images
return im_params
def example_extractedsource_tuple(ra=123.123, dec=10.5, # Arbitrarily picked defaults
ra_fit_err=5. / 3600, dec_fit_err=6. / 3600,
peak=15e-3, peak_err=5e-4,
flux=15e-3, flux_err=5e-4,
sigma=15.,
beam_maj=100., beam_min=100., beam_angle=45.,
ew_sys_err=20., ns_sys_err=20.,
error_radius=10.0, fit_type=0,
chisq=5., reduced_chisq=1.5):
"""Generates an example 'fake extraction' for unit testing.
Note that while RA and Dec are arbitrary, they should (usually) be close
to the RA and Dec of any fake images used, since the association routines
reject sources outside of designated extraction regions.
"""
# NOTE: ra_fit_err & dec_fit_err are in degrees,
# and ew_sys_err, ns_sys_err and error_radius are in arcsec.
# The ew_uncertainty_ew is then the sqrt of the quadratic sum of the
# systematic error and the error_radius
return ExtractedSourceTuple(ra=ra, dec=dec,
ra_fit_err=ra_fit_err, dec_fit_err=dec_fit_err,
peak=peak, peak_err=peak_err,
flux=flux, flux_err=flux_err,
sigma=sigma,
beam_maj=beam_maj, beam_min=beam_min,
beam_angle=beam_angle,
ew_sys_err=ew_sys_err, ns_sys_err=ns_sys_err,
error_radius=error_radius, fit_type=fit_type,
chisq=chisq, reduced_chisq=reduced_chisq
)
def deRuiter_radius(src1, src2):
"""Calculates the De Ruiter radius for two sources"""
# The errors are the square root of the quadratic sum of
# the systematic and fitted errors.
src1_ew_uncertainty = math.sqrt(src1.ew_sys_err**2 + src1.error_radius**2) / 3600.
src1_ns_uncertainty = math.sqrt(src1.ns_sys_err**2 + src1.error_radius**2) / 3600.
src2_ew_uncertainty = math.sqrt(src2.ew_sys_err**2 + src2.error_radius**2) / 3600.
src2_ns_uncertainty = math.sqrt(src2.ns_sys_err**2 + src2.error_radius**2) / 3600.
ra_nom = ((src1.ra - src2.ra) * math.cos(math.radians(0.5 * (src1.dec + src2.dec))))**2
ra_denom = src1_ew_uncertainty**2 + src2_ew_uncertainty**2
ra_fac = ra_nom / ra_denom
dec_nom = (src1.dec - src2.dec)**2
dec_denom = src1_ns_uncertainty**2 + src2_ns_uncertainty**2
dec_fac = dec_nom / dec_denom
dr = math.sqrt(ra_fac + dec_fac)
return dr
def lightcurve_metrics(src_list):
"""
Calculates various metrics for a lightcurve made up of source extractions
These are normally calculated internally in the database - this function
serves as a sanity check, and is used for unit-testing purposes.
Returns a list of dictionaries, the nth dict representing the value
of the metrics after processing the first n extractions in the lightcurve.
The dict keys mirror the column names in the database, to make
cross-checking of results trivial.
Final note: this function is very inefficient, recalculating over the
first n extractions for each step. We could make it iterative, updating
the weighted averages as we do in the database. However, this way
provides a stronger cross-check that our iterative SQL approaches are
correct - less chance of making the same mistakes in two languages!
"""
metrics = []
for i, src in enumerate(src_list):
N = i + 1
avg_int_flux = sum(src.flux for src in src_list[0:N]) / N
avg_int_flux_sq = sum(src.flux**2 for src in src_list[0:N]) / N
avg_w_f_int = sum(src.flux/src.flux_err**2 for src in src_list[0:N]) / N
avg_w_f_int_sq = sum(src.flux**2/src.flux_err**2 for src in src_list[0:N]) / N
avg_w = sum(1./src.flux_err**2 for src in src_list[0:N]) / N
if N == 1:
v = 0.0
eta = 0.0
else:
v = math.sqrt(N * (avg_int_flux_sq - avg_int_flux**2) / (N - 1.)) / avg_int_flux
eta = N * (avg_w_f_int_sq - avg_w_f_int**2/avg_w) / (N - 1.)
metrics.append({
'v_int': v,
'eta_int': eta,
'avg_f_int': avg_int_flux,
'avg_f_int_sq': avg_int_flux_sq,
'avg_f_int_weight': avg_w,
'avg_weighted_f_int': avg_w_f_int,
'avg_weighted_f_int_sq': avg_w_f_int_sq,
'f_datapoints': N
})
return metrics
class MockSource(object):
def __init__(self,
template_extractedsource,
lightcurve,
):
"""
Defines a MockSource for generating mock source lists.
(These can be used to test the database routines.)
The lightcurve-dict entries define the times of non-zero
flux (we do not support time-ranges here, discretely defined datapoints are
sufficiently complex for the current unit-test suite). In this case,
any undefined datetimes requested will produce a zero-flux measurement.
A defaultdict may be supplied to simulate a steady-flux source.
Args:
template_extractedsource (ExtractedSourceTuple): This defines
everything **except** the flux and significance of the
extraction (i.e. position, fit error, beam properties, etc.).
lightcurve (dict): A dict mapping datetime -> flux value [Jy].
Any undefined datetimes will produce a zero-flux measurement.
A defaultdict with constant-valued default may be supplied to
represent a steady source, e.g.
>>>MockSource(base_source, defaultdict(lambda:steady_flux_val))
"""
self.base_source = template_extractedsource
self.lightcurve = lightcurve
def value_at_dtime(self, dtime, image_rms):
"""Returns an `extractedsource` for a given datetime.
If lightcurve is defined but does not contain the requested datetime,
then peak, flux, sigma are all set to zero.
"""
try:
fluxval = self.lightcurve[dtime]
except KeyError:
fluxval = 0
return self.base_source._replace(
peak=fluxval,flux=fluxval,sigma=fluxval/image_rms)
def simulate_extraction(self, db_image, extraction_type,
rms_attribute='rms_min'):
"""
Simulate extraction process, returns extracted source or none.
Uses the database image properties (extraction region, rms values)
to determine if this source would be extracted in the given image,
and return an extraction or None accordingly.
Args:
db_image (int): Database Image object.
extraction_type: Valid values are 'blind', 'ff_nd'. If 'blind'
then we only return an extracted source if the flux is above
rms_value * detection_threshold.
rms_attribute (str): Valid values are 'rms_min', 'rms_max'.
Determines which rms value we use when deciding if this source
will be seen in a blind extraction.
Returns:
ExtractedSourceTuple or None.
"""
rms = getattr(db_image, rms_attribute)
ex = self.value_at_dtime(db_image.taustart_ts, rms)
#First check if source is in this image's extraction region:
src_distance_degrees = coords.angsep(
ex.ra, ex.dec,db_image.centre_ra, db_image.centre_decl) / 3600.0
if src_distance_degrees > db_image.xtr_radius:
return None
if extraction_type == 'ff_nd':
return ex
elif extraction_type == 'blind':
if ex.sigma > db_image.detection_thresh:
return ex
else:
return None
else:
raise ValueError("Unrecognised extraction type: {}".format(
extraction_type))
def insert_image_and_simulated_sources(dataset, image_params, mock_sources,
new_source_sigma_margin,
deruiter_radius=3.7):
"""
Simulates the standard database image-and-source insertion logic using mock
sources.
Args:
dataset: The dataset object
image_params (dict): Contains the image properties.
mock_sources (list of MockSource): The mock sources to simulate.
new_source_sigma_margin (float): Parameter passed to source-association
routines.
deruiter_radius (float): Parameter passed to source-association
routines.
Returns:
3-tuple (image, list of blind extractions, list of forced fits).
"""
image = tkp.db.Image(data=image_params,dataset=dataset)
blind_extractions=[]
for src in mock_sources:
xtr = src.simulate_extraction(image,extraction_type='blind')
if xtr is not None:
blind_extractions.append(xtr)
image.insert_extracted_sources(blind_extractions,'blind')
image.associate_extracted_sources(deRuiter_r=deruiter_radius,
new_source_sigma_margin=new_source_sigma_margin)
nd_ids_posns = nulldetections.get_nulldetections(image.id)
nd_posns = [(ra,decl) for ids, ra, decl in nd_ids_posns]
forced_fits = []
for posn in nd_posns:
for src in mock_sources:
eps = 1e-13
if (math.fabs(posn[0] - src.base_source.ra)<eps and
math.fabs(posn[1] - src.base_source.dec)<eps ):
forced_fits.append(
src.simulate_extraction(image,extraction_type='ff_nd')
)
if len(nd_posns) != len(forced_fits):
raise LookupError("Something went wrong, nulldetection position did "
"not match a mock source.")
#image.insert_extracted_sources(forced_fits, 'ff_nd')
dbgen.insert_extracted_sources(image.id, forced_fits, 'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_posns])
nulldetections.associate_nd(image.id)
return image, blind_extractions, forced_fits
def get_newsources_for_dataset(dsid):
"""
Returns dicts representing all newsources for this dataset.
Args:
dsid: Dataset id
Returns:
list: (list of dicts) Each dict represents one newsource.
The dict keys are all the columns in the newsources table, plus
the 'taustart_ts' from the image table, which represents the
trigger time.
"""
qry = """\
SELECT tr.id
,tr.previous_limits_image
,rc.id as runcat_id
,img.taustart_ts
,img.band
,ax.v_int
,ax.eta_int
, ((ex.f_peak - limits_image.detection_thresh*limits_image.rms_min)
/ limits_image.rms_min) AS low_thresh_sigma
, ((ex.f_peak - limits_image.detection_thresh*limits_image.rms_max)
/ limits_image.rms_max) AS high_thresh_sigma
FROM newsource tr
,runningcatalog rc
,extractedsource ex
,image img
,assocxtrsource ax
,image limits_image
WHERE rc.dataset = %(dsid)s
AND tr.runcat = rc.id
AND tr.trigger_xtrsrc = ex.id
AND ex.image = img.id
AND ax.runcat = rc.id
AND ax.xtrsrc = ex.id
AND tr.previous_limits_image = limits_image.id
"""
cursor = Database().cursor
cursor.execute(qry, {'dsid':dsid})
newsource_rows_for_dataset = get_db_rows_as_dicts(cursor)
return newsource_rows_for_dataset
def get_sources_filtered_by_final_variability(dataset_id,
eta_min,
v_min,
# minpoints
):
"""
Search the database to find high-variability lightcurves.
Uses the variability associated with the last datapoint in a lightcurve
as the key criteria.
Args:
dataset_id (int): Dataset to search
eta_min (float): Minimum value of eta-index to return.
v_min (float): Minimum value of V-index to return.
Returns:
list: (list of dicts) Each dict represents a runningcatalog_flux entry
matching the filter criteria.
"""
query = """\
SELECT rc.id as runcat_id
,image.band
,ax.v_int
,ax.eta_int
FROM runningcatalog as rc
JOIN assocxtrsource as ax ON ax.runcat = rc.id
JOIN extractedsource as ex ON ax.xtrsrc = ex.id
JOIN image ON ex.image = image.id
JOIN (
-- Determine which are the most recent variability values
-- for each lightcurve.
SELECT
a.runcat as runcat_id,
i.band as band,
max(i.taustart_ts) as MaxTimestamp
FROM
assocxtrsource a
JOIN extractedsource e ON a.xtrsrc = e.id
JOIN image i ON e.image = i.id
GROUP BY
runcat_id, band
) last_timestamps
ON rc.id = last_timestamps.runcat_id
AND image.band = last_timestamps.band
AND image.taustart_ts = last_timestamps.MaxTimestamp
WHERE rc.dataset = %(dataset_id)s
AND eta_int >= %(eta_min)s
AND v_int >= %(v_min)s
"""
cursor = tkp.db.Database().cursor
cursor.execute(query, {'dataset_id': dataset_id,
'eta_min':eta_min,
'v_min':v_min,
})
transients = get_db_rows_as_dicts(cursor)
return transients
|
|
from __future__ import print_function
import re
import sys
from . import common
if sys.version_info[0] > 2:
class string:
expandtabs = str.expandtabs
else:
import string
# RegEx: this is where the magic happens.
##### Assembly parser
ASM_FUNCTION_X86_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_RE = re.compile(
r'^(?P<func>[0-9a-zA-Z_]+):\n' # f: (name of function)
r'\s+\.fnstart\n' # .fnstart
r'(?P<body>.*?)\n' # (body of the function)
r'.Lfunc_end[0-9]+:', # .Lfunc_end0: or # -- End function
flags=(re.M | re.S))
ASM_FUNCTION_AARCH64_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@(?P=func)\n'
r'(?:[ \t]+.cfi_startproc\n)?' # drop optional cfi noise
r'(?P<body>.*?)\n'
# This list is incomplete
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_AMDGPU_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@(?P=func)\n[^:]*?'
r'(?P<body>.*?)\n' # (body of the function)
# This list is incomplete
r'^\s*(\.Lfunc_end[0-9]+:\n|\.section)',
flags=(re.M | re.S))
ASM_FUNCTION_HEXAGON_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*//[ \t]*@(?P=func)\n[^:]*?'
r'(?P<body>.*?)\n' # (body of the function)
# This list is incomplete
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_MIPS_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' # f: (name of func)
r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+' # Mips+LLVM standard asm prologue
r'(?P<body>.*?)\n' # (body of the function)
# Mips+LLVM standard asm epilogue
r'(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)'
r'(\$|\.L)func_end[0-9]+:\n', # $func_end0: (mips32 - O32) or
# .Lfunc_end0: (mips64 - NewABI)
flags=(re.M | re.S))
ASM_FUNCTION_MSP430_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@(?P=func)\n[^:]*?'
r'(?P<body>.*?)\n'
r'(\$|\.L)func_end[0-9]+:\n', # $func_end0:
flags=(re.M | re.S))
ASM_FUNCTION_PPC_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
r'.*?'
r'\.Lfunc_begin[0-9]+:\n'
r'(?:[ \t]+.cfi_startproc\n)?'
r'(?:\.Lfunc_[gl]ep[0-9]+:\n(?:[ \t]+.*?\n)*)*'
r'(?P<body>.*?)\n'
# This list is incomplete
r'(?:^[ \t]*(?:\.long[ \t]+[^\n]+|\.quad[ \t]+[^\n]+)\n)*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_RISCV_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_LANAI_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@(?P=func)\n'
r'(?:[ \t]+.cfi_startproc\n)?' # drop optional cfi noise
r'(?P<body>.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_SPARC_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@(?P=func)\n'
r'(?P<body>.*?)\s*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_SYSTEMZ_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
r'[ \t]+.cfi_startproc\n'
r'(?P<body>.*?)\n'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
ASM_FUNCTION_AARCH64_DARWIN_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*;[ \t]@(?P=func)\n'
r'([ \t]*.cfi_startproc\n[\s]*)?'
r'(?P<body>.*?)'
r'([ \t]*.cfi_endproc\n[\s]*)?'
r'^[ \t]*;[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_DARWIN_RE = re.compile(
r'^[ \t]*\.globl[ \t]*_(?P<func>[^ \t])[ \t]*@[ \t]--[ \t]Begin[ \t]function[ \t](?P=func)'
r'(?P<directives>.*?)'
r'^_(?P=func):\n[ \t]*'
r'(?P<body>.*?)'
r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S ))
ASM_FUNCTION_ARM_MACHO_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*\n'
r'([ \t]*.cfi_startproc\n[ \t]*)?'
r'(?P<body>.*?)\n'
r'[ \t]*\.cfi_endproc\n',
flags=(re.M | re.S))
ASM_FUNCTION_ARM_IOS_RE = re.compile(
r'^_(?P<func>[^:]+):[ \t]*\n'
r'^Lfunc_begin(?P<id>[0-9][1-9]*):\n'
r'(?P<body>.*?)'
r'^Lfunc_end(?P=id):\n'
r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
flags=(re.M | re.S))
ASM_FUNCTION_WASM32_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
r'(?P<body>.*?)\n'
r'^\s*(\.Lfunc_end[0-9]+:\n|end_function)',
flags=(re.M | re.S))
SCRUB_LOOP_COMMENT_RE = re.compile(
r'# =>This Inner Loop Header:.*|# in Loop:.*', flags=re.M)
SCRUB_X86_SHUFFLES_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
flags=re.M))
SCRUB_X86_SPILL_RELOAD_RE = (
re.compile(
r'-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$',
flags=re.M))
SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
SCRUB_X86_LCP_RE = re.compile(r'\.LCPI[0-9]+_[0-9]+')
SCRUB_X86_RET_RE = re.compile(r'ret[l|q]')
def scrub_asm_x86(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Detect shuffle asm comments and hide the operands in favor of the comments.
asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
# Detect stack spills and reloads and hide their exact offset and whether
# they used the stack pointer or frame pointer.
asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)
# Generically match the stack offset of a memory operand.
asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
if getattr(args, 'x86_scrub_rip', False):
# Generically match a RIP-relative memory operand.
asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
if getattr(args, 'extra_scrub', False):
# Avoid generating different checks for 32- and 64-bit because of 'retl' vs 'retq'.
asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)
# Strip kill operands inserted into the asm.
asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_amdgpu(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_arm_eabi(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip kill operands inserted into the asm.
asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_hexagon(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_powerpc(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Stripe unimportant comments, but leave the token '#' in place.
asm = SCRUB_LOOP_COMMENT_RE.sub(r'#', asm)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_mips(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_msp430(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_riscv(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_lanai(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_sparc(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_systemz(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_wasm32(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def get_triple_from_march(march):
triples = {
'amdgcn': 'amdgcn',
'r600': 'r600',
'mips': 'mips',
'sparc': 'sparc',
'hexagon': 'hexagon',
}
for prefix, triple in triples.items():
if march.startswith(prefix):
return triple
print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
return 'x86'
def build_function_body_dictionary_for_triple(args, raw_tool_output, triple, prefixes, func_dict):
target_handlers = {
'i686': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
'aarch64-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'arm': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
'arm64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
'arm64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
'armv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
'armv7-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
'thumb': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
'thumb-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
'thumbv5-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
'thumbv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
'mips': (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
'msp430': (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
'ppc32': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
'powerpc': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
'riscv32': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'riscv64': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'lanai': (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
'sparc': (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
's390x': (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
'wasm32': (scrub_asm_wasm32, ASM_FUNCTION_WASM32_RE),
}
handler = None
best_prefix = ''
for prefix, s in target_handlers.items():
if triple.startswith(prefix) and len(prefix) > len(best_prefix):
handler = s
best_prefix = prefix
if handler is None:
raise KeyError('Triple %r is not supported' % (triple))
scrubber, function_re = handler
common.build_function_body_dictionary(
function_re, scrubber, [args], raw_tool_output, prefixes,
func_dict, args.verbose, False)
##### Generator of assembly CHECK lines
def add_asm_checks(output_lines, comment_marker, prefix_list, func_dict, func_name):
# Label format is based on ASM string.
check_label_format = '{} %s-LABEL: %s%s:'.format(comment_marker)
common.add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, check_label_format, True, False)
|
|
"""AuthZ Adapter implementations of commenting managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid.osid_errors import Unimplemented, OperationFailed
from ..osid.osid_errors import Unsupported
from ..primitives import Id
from dlkit.authz_adapter.osid import managers as osid_managers
from dlkit.manager_impls.commenting import managers as commenting_managers
class CommentingProfile(osid_managers.OsidProfile, commenting_managers.CommentingProfile):
"""Adapts underlying CommentingProfile methodswith authorization checks."""
def __init__(self, interface_name):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self):
try:
return self._provider_manager.get_book_hierarchy_session(
Id(authority='COMMENTING',
namespace='CATALOG',
identifier='BOOK'))
except Unsupported:
return None
def supports_comment_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_lookup()
def supports_comment_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_query()
def supports_comment_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_admin()
def supports_book_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_lookup()
def supports_book_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_admin()
def supports_book_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_hierarchy()
def supports_book_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_hierarchy_design()
def get_comment_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_comment_record_types()
comment_record_types = property(fget=get_comment_record_types)
def get_comment_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_comment_search_record_types()
comment_search_record_types = property(fget=get_comment_search_record_types)
def get_book_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_book_record_types()
book_record_types = property(fget=get_book_record_types)
def get_book_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_book_search_record_types()
book_search_record_types = property(fget=get_book_search_record_types)
class CommentingManager(osid_managers.OsidManager, CommentingProfile, commenting_managers.CommentingManager):
"""Adapts underlying CommentingManager methodswith authorization checks."""
def __init__(self):
CommentingProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:commentingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('COMMENTING', provider_impl) # need to add version argument
def get_comment_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_comment_query_session()
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentLookupSession')(
self._provider_manager.get_comment_lookup_session(),
self._get_authz_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
comment_lookup_session = property(fget=get_comment_lookup_session)
def get_comment_lookup_session_for_book(self, book_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_comment_query_session_for_book(book_id)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentLookupSession')(
self._provider_manager.get_comment_lookup_session_for_book(book_id),
self._get_authz_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_comment_query_session()
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentQuerySession')(
self._provider_manager.get_comment_query_session(),
self._get_authz_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
comment_query_session = property(fget=get_comment_query_session)
def get_comment_query_session_for_book(self, book_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_comment_query_session_for_book(book_id)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentQuerySession')(
self._provider_manager.get_comment_query_session_for_book(book_id),
self._get_authz_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'CommentAdminSession')(
self._provider_manager.get_comment_admin_session(),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
comment_admin_session = property(fget=get_comment_admin_session)
def get_comment_admin_session_for_book(self, book_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
return getattr(sessions, 'CommentAdminSession')(
self._provider_manager.get_comment_admin_session_for_book(book_id),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
def get_book_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookLookupSession')(
self._provider_manager.get_book_lookup_session(),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
book_lookup_session = property(fget=get_book_lookup_session)
def get_book_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookAdminSession')(
self._provider_manager.get_book_admin_session(),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
book_admin_session = property(fget=get_book_admin_session)
def get_book_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookHierarchySession')(
self._provider_manager.get_book_hierarchy_session(),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
book_hierarchy_session = property(fget=get_book_hierarchy_session)
def get_book_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookHierarchyDesignSession')(
self._provider_manager.get_book_hierarchy_design_session(),
self._get_authz_session())
except AttributeError:
raise OperationFailed()
book_hierarchy_design_session = property(fget=get_book_hierarchy_design_session)
def get_commenting_batch_manager(self):
raise Unimplemented()
commenting_batch_manager = property(fget=get_commenting_batch_manager)
class CommentingProxyManager(osid_managers.OsidProxyManager, CommentingProfile, commenting_managers.CommentingProxyManager):
"""Adapts underlying CommentingProxyManager methodswith authorization checks."""
def __init__(self):
CommentingProfile.__init__(self, 'CommentingProxyManager')
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:commentingProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('COMMENTING', provider_impl) # need to add version argument
def get_comment_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_comment_query_session(proxy)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentLookupSession')(
self._provider_manager.get_comment_lookup_session(proxy),
self._get_authz_session(),
proxy,
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_lookup_session_for_book(self, book_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_comment_query_session_for_book(book_id, proxy)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentLookupSession')(
self._provider_manager.get_comment_lookup_session_for_book(book_id, proxy),
self._get_authz_session(),
proxy,
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_comment_query_session(proxy)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentQuerySession')(
self._provider_manager.get_comment_query_session(proxy),
self._get_authz_session(),
proxy,
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_query_session_for_book(self, book_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_comment_query_session_for_book(book_id, proxy)
query_session.use_federated_book_view()
except Unsupported:
query_session = None
try:
return getattr(sessions, 'CommentQuerySession')(
self._provider_manager.get_comment_query_session_for_book(book_id, proxy),
self._get_authz_session(),
proxy,
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
except AttributeError:
raise OperationFailed()
def get_comment_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'CommentAdminSession')(
self._provider_manager.get_comment_admin_session(proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_comment_admin_session_for_book(self, book_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
return getattr(sessions, 'CommentAdminSession')(
self._provider_manager.get_comment_admin_session_for_book(book_id, proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_book_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookLookupSession')(
self._provider_manager.get_book_lookup_session(proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_book_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookAdminSession')(
self._provider_manager.get_book_admin_session(proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_book_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookHierarchySession')(
self._provider_manager.get_book_hierarchy_session(proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_book_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
return getattr(sessions, 'BookHierarchyDesignSession')(
self._provider_manager.get_book_hierarchy_design_session(proxy),
self._get_authz_session(),
proxy)
except AttributeError:
raise OperationFailed()
def get_commenting_batch_proxy_manager(self):
raise Unimplemented()
commenting_batch_proxy_manager = property(fget=get_commenting_batch_proxy_manager)
|
|
# -*- coding: utf-8 -*-
"""Influence and Outlier Measures
Created on Sun Jan 29 11:16:09 2012
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import lzip
from collections import defaultdict
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
from statsmodels.stats.multitest import multipletests
from statsmodels.tools.tools import maybe_unwrap_results
# outliers test convenience wrapper
def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
order=False):
"""
Outlier Tests for RegressionResults instances.
Parameters
----------
model_results : RegressionResults instance
Linear model results
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from scipy import stats # lazy import
infl = getattr(model_results, 'get_influence', None)
if infl is None:
results = maybe_unwrap_results(model_results)
raise AttributeError("model_results object %s does not have a "
"get_influence method." % results.__class__.__name__)
resid = infl().resid_studentized_external
if order:
idx = np.abs(resid).argsort()[::-1]
resid = resid[idx]
if labels is not None:
labels = np.array(labels)[idx].tolist()
df = model_results.df_resid - 1
unadj_p = stats.t.sf(np.abs(resid), df) * 2
adj_p = multipletests(unadj_p, alpha=alpha, method=method)
data = np.c_[resid, unadj_p, adj_p[1]]
if labels is None:
labels = getattr(model_results.model.data, 'row_labels', None)
if labels is not None:
from pandas import DataFrame
return DataFrame(data,
columns=['student_resid', 'unadj_p', method+"(p)"],
index=labels)
return data
#influence measures
def reset_ramsey(res, degree=5):
'''Ramsey's RESET specification test for linear models
This is a general specification test, for additional non-linear effects
in a model.
Notes
-----
The test fits an auxiliary OLS regression where the design matrix, exog,
is augmented by powers 2 to degree of the fitted values. Then it performs
an F-test whether these additional terms are significant.
If the p-value of the f-test is below a threshold, e.g. 0.1, then this
indicates that there might be additional non-linear effects in the model
and that the linear model is mis-specified.
References
----------
http://en.wikipedia.org/wiki/Ramsey_RESET_test
'''
order = degree + 1
k_vars = res.model.exog.shape[1]
#vander without constant and x:
y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
exog = np.column_stack((res.model.exog, y_fitted_vander))
res_aux = OLS(res.model.endog, exog).fit()
#r_matrix = np.eye(degree, exog.shape[1], k_vars)
r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
#df1 = degree - 1
#df2 = exog.shape[0] - degree - res.df_model (without constant)
return res_aux.f_test(r_matrix) #, r_matrix, res_aux
def variance_inflation_factor(exog, exog_idx):
'''variance inflation factor, VIF, for one exogenous variable
The variance inflation factor is a measure for the increase of the
variance of the parameter estimates if an additional variable, given by
exog_idx is added to the linear regression. It is a measure for
multicollinearity of the design matrix, exog.
One recommendation is that if VIF is greater than 5, then the explanatory
variable given by exog_idx is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this.
Parameters
----------
exog : ndarray, (nobs, k_vars)
design matrix with all explanatory variables, as for example used in
regression
exog_idx : int
index of the exogenous variable in the columns of exog
Returns
-------
vif : float
variance inflation factor
Notes
-----
This function does not save the auxiliary regression.
See Also
--------
xxx : class for regression diagnostics TODO: doesn't exist yet
References
----------
http://en.wikipedia.org/wiki/Variance_inflation_factor
'''
k_vars = exog.shape[1]
x_i = exog[:, exog_idx]
mask = np.arange(k_vars) != exog_idx
x_noti = exog[:, mask]
r_squared_i = OLS(x_i, x_noti).fit().rsquared
vif = 1. / (1. - r_squared_i)
return vif
class OLSInfluence(object):
'''class to calculate outlier and influence measures for OLS result
Parameters
----------
results : Regression Results instance
currently assumes the results are from an OLS regression
Notes
-----
One part of the results can be calculated without any auxiliary regression
(some of which have the `_internal` postfix in the name. Other statistics
require leave-one-observation-out (LOOO) auxiliary regression, and will be
slower (mainly results with `_external` postfix in the name).
The auxiliary LOOO regression only the required results are stored.
Using the LOO measures is currently only recommended if the data set
is not too large. One possible approach for LOOO measures would be to
identify possible problem observations with the _internal measures, and
then run the leave-one-observation-out only with observations that are
possible outliers. (However, this is not yet available in an automized way.)
This should be extended to general least squares.
The leave-one-variable-out (LOVO) auxiliary regression are currently not
used.
'''
def __init__(self, results):
#check which model is allowed
self.results = maybe_unwrap_results(results)
self.nobs, self.k_vars = results.model.exog.shape
self.endog = results.model.endog
self.exog = results.model.exog
self.model_class = results.model.__class__
self.sigma_est = np.sqrt(results.mse_resid)
self.aux_regression_exog = {}
self.aux_regression_endog = {}
@cache_readonly
def hat_matrix_diag(self):
'''(cached attribute) diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model class
'''
return (self.exog * self.results.model.pinv_wexog.T).sum(1)
@cache_readonly
def resid_press(self):
'''(cached attribute) PRESS residuals
'''
hii = self.hat_matrix_diag
return self.results.resid / (1 - hii)
@cache_readonly
def influence(self):
'''(cached attribute) influence measure
matches the influence measure that gretl reports
u * h / (1 - h)
where u are the residuals and h is the diagonal of the hat_matrix
'''
hii = self.hat_matrix_diag
return self.results.resid * hii / (1 - hii)
@cache_readonly
def hat_diag_factor(self):
'''(cached attribute) factor of diagonal of hat_matrix used in influence
this might be useful for internal reuse
h / (1 - h)
'''
hii = self.hat_matrix_diag
return hii / (1 - hii)
@cache_readonly
def ess_press(self):
'''(cached attribute) error sum of squares of PRESS residuals
'''
return np.dot(self.resid_press, self.resid_press)
@cache_readonly
def resid_studentized_internal(self):
'''(cached attribute) studentized residuals using variance from OLS
this uses sigma from original estimate
does not require leave one out loop
'''
return self.get_resid_studentized_external(sigma=None)
#return self.results.resid / self.sigma_est
@cache_readonly
def resid_studentized_external(self):
'''(cached attribute) studentized residuals using LOOO variance
this uses sigma from leave-one-out estimates
requires leave one out loop for observations
'''
sigma_looo = np.sqrt(self.sigma2_not_obsi)
return self.get_resid_studentized_external(sigma=sigma_looo)
def get_resid_studentized_external(self, sigma=None):
'''calculate studentized residuals
Parameters
----------
sigma : None or float
estimate of the standard deviation of the residuals. If None, then
the estimate from the regression results is used.
Returns
-------
stzd_resid : ndarray
studentized residuals
Notes
-----
studentized residuals are defined as ::
resid / sigma / np.sqrt(1 - hii)
where resid are the residuals from the regression, sigma is an
estimate of the standard deviation of the residuals, and hii is the
diagonal of the hat_matrix.
'''
hii = self.hat_matrix_diag
if sigma is None:
sigma2_est = self.results.mse_resid
#can be replace by different estimators of sigma
sigma = np.sqrt(sigma2_est)
return self.results.resid / sigma / np.sqrt(1 - hii)
@cache_readonly
def dffits_internal(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_internal
uses original results, no nobs loop
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dffits(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_external,
uses results from leave-one-observation-out loop
It is recommended that observations with dffits large than a
threshold of 2 sqrt{k / n} where k is the number of parameters, should
be investigated.
Returns
-------
dffits: float
dffits_threshold : float
References
----------
`Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dfbetas(self):
'''(cached attribute) dfbetas
uses results from leave-one-observation-out loop
'''
dfbetas = self.results.params - self.params_not_obsi#[None,:]
dfbetas /= np.sqrt(self.sigma2_not_obsi[:,None])
dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params))
return dfbetas
@cache_readonly
def sigma2_not_obsi(self):
'''(cached attribute) error variance for all LOOO regressions
This is 'mse_resid' from each auxiliary regression.
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['mse_resid'])
@cache_readonly
def params_not_obsi(self):
'''(cached attribute) parameter estimates for all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['params'])
@cache_readonly
def det_cov_params_not_obsi(self):
'''(cached attribute) determinant of cov_params of all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['det_cov_params'])
@cache_readonly
def cooks_distance(self):
'''(cached attribute) Cooks distance
uses original results, no nobs loop
'''
hii = self.hat_matrix_diag
#Eubank p.93, 94
cooks_d2 = self.resid_studentized_internal**2 / self.k_vars
cooks_d2 *= hii / (1 - hii)
from scipy import stats
#alpha = 0.1
#print stats.f.isf(1-alpha, n_params, res.df_modelwc)
pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)
return cooks_d2, pvals
@cache_readonly
def cov_ratio(self):
'''(cached attribute) covariance ratio between LOOO and original
This uses determinant of the estimate of the parameter covariance
from leave-one-out estimates.
requires leave one out loop for observations
'''
#don't use inplace division / because then we change original
cov_ratio = (self.det_cov_params_not_obsi
/ np.linalg.det(self.results.cov_params()))
return cov_ratio
@cache_readonly
def resid_var(self):
'''(cached attribute) estimate of variance of the residuals
::
sigma2 = sigma2_OLS * (1 - hii)
where hii is the diagonal of the hat matrix
'''
#TODO:check if correct outside of ols
return self.results.mse_resid * (1 - self.hat_matrix_diag)
@cache_readonly
def resid_std(self):
'''(cached attribute) estimate of standard deviation of the residuals
See Also
--------
resid_var
'''
return np.sqrt(self.resid_var)
def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True):
'''regression results from LOVO auxiliary regression with cache
The result instances are stored, which could use a large amount of
memory if the datasets are large. There are too many combinations to
store them all, except for small problems.
Parameters
----------
drop_idx : int
index of exog that is dropped from the regression
endog_idx : 'endog' or int
If 'endog', then the endogenous variable of the result instance
is regressed on the exogenous variables, excluding the one at
drop_idx. If endog_idx is an integer, then the exog with that
index is regressed with OLS on all other exogenous variables.
(The latter is the auxiliary regression for the variance inflation
factor.)
this needs more thought, memory versus speed
not yet used in any other parts, not sufficiently tested
'''
#reverse the structure, access store, if fail calculate ?
#this creates keys in store even if store = false ! bug
if endog_idx == 'endog':
stored = self.aux_regression_endog
if hasattr(stored, drop_idx):
return stored[drop_idx]
x_i = self.results.model.endog
else:
#nested dictionary
try:
self.aux_regression_exog[endog_idx][drop_idx]
except KeyError:
pass
stored = self.aux_regression_exog[endog_idx]
stored = {}
x_i = self.exog[:, endog_idx]
k_vars = self.exog.shape[1]
mask = np.arange(k_vars) != drop_idx
x_noti = self.exog[:, mask]
res = OLS(x_i, x_noti).fit()
if store:
stored[drop_idx] = res
return res
def _get_drop_vari(self, attributes):
'''regress endog on exog without one of the variables
This uses a k_vars loop, only attributes of the OLS instance are stored.
Parameters
----------
attributes : list of strings
These are the names of the attributes of the auxiliary OLS results
instance that are stored and returned.
not yet used
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
endog = self.results.model.endog
exog = self.exog
cv_iter = LeaveOneOut(self.k_vars)
res_loo = defaultdict(list)
for inidx, outidx in cv_iter:
for att in attributes:
res_i = self.model_class(endog, exog[:,inidx]).fit()
res_loo[att].append(getattr(res_i, att))
return res_loo
@cache_readonly
def _res_looo(self):
'''collect required results from the LOOO loop
all results will be attached.
currently only 'params', 'mse_resid', 'det_cov_params' are stored
regresses endog on exog dropping one observation at a time
this uses a nobs loop, only attributes of the OLS instance are stored.
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
get_det_cov_params = lambda res: np.linalg.det(res.cov_params())
endog = self.endog
exog = self.exog
params = np.zeros(exog.shape, dtype=np.float)
mse_resid = np.zeros(endog.shape, dtype=np.float)
det_cov_params = np.zeros(endog.shape, dtype=np.float)
cv_iter = LeaveOneOut(self.nobs)
for inidx, outidx in cv_iter:
res_i = self.model_class(endog[inidx], exog[inidx]).fit()
params[outidx] = res_i.params
mse_resid[outidx] = res_i.mse_resid
det_cov_params[outidx] = get_det_cov_params(res_i)
return dict(params=params, mse_resid=mse_resid,
det_cov_params=det_cov_params)
def summary_frame(self):
"""
Creates a DataFrame with all available influence results.
Returns
-------
frame : DataFrame
A DataFrame with all results.
Notes
-----
The resultant DataFrame contains six variables in addition to the
DFBETAS. These are:
* cooks_d : Cook's Distance defined in `Influence.cooks_distance`
* standard_resid : Standardized residuals defined in
`Influence.resid_studentized_internal`
* hat_diag : The diagonal of the projection, or hat, matrix defined in
`Influence.hat_matrix_diag`
* dffits_internal : DFFITS statistics using internally Studentized
residuals defined in `Influence.dffits_internal`
* dffits : DFFITS statistics using externally Studentized residuals
defined in `Influence.dffits`
* student_resid : Externally Studentized residuals defined in
`Influence.resid_studentized_external`
"""
from pandas import DataFrame
# row and column labels
data = self.results.model.data
row_labels = data.row_labels
beta_labels = ['dfb_' + i for i in data.xnames]
# grab the results
summary_data = DataFrame(dict(
cooks_d = self.cooks_distance[0],
standard_resid = self.resid_studentized_internal,
hat_diag = self.hat_matrix_diag,
dffits_internal = self.dffits_internal[0],
student_resid = self.resid_studentized_external,
dffits = self.dffits[0],
),
index = row_labels)
#NOTE: if we don't give columns, order of above will be arbitrary
dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
index=row_labels)
return dfbeta.join(summary_data)
def summary_table(self, float_fmt="%6.3f"):
'''create a summary table with all influence and outlier measures
This does currently not distinguish between statistics that can be
calculated from the original regression results and for which a
leave-one-observation-out loop is needed
Returns
-------
res : SimpleTable instance
SimpleTable instance with the results, can be printed
Notes
-----
This also attaches table_data to the instance.
'''
#print self.dfbetas
# table_raw = [ np.arange(self.nobs),
# self.endog,
# self.fittedvalues,
# self.cooks_distance(),
# self.resid_studentized_internal,
# self.hat_matrix_diag,
# self.dffits_internal,
# self.resid_studentized_external,
# self.dffits,
# self.dfbetas
# ]
table_raw = [ ('obs', np.arange(self.nobs)),
('endog', self.endog),
('fitted\nvalue', self.results.fittedvalues),
("Cook's\nd", self.cooks_distance[0]),
("student.\nresidual", self.resid_studentized_internal),
('hat diag', self.hat_matrix_diag),
('dffits \ninternal', self.dffits_internal[0]),
("ext.stud.\nresidual", self.resid_studentized_external),
('dffits', self.dffits[0])
]
colnames, data = lzip(*table_raw) #unzip
data = np.column_stack(data)
self.table_data = data
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
return SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
def summary_table(res, alpha=0.05):
'''generate summary table of outlier and influence similar to SAS
Parameters
----------
alpha : float
significance level for confidence interval
Returns
-------
st : SimpleTable instance
table with results that can be printed
data : ndarray
calculated measures and statistics for the table
ss2 : list of strings
column_names for table (Note: rows of table are observations)
'''
from scipy import stats
from statsmodels.sandbox.regression.predstd import wls_prediction_std
infl = OLSInfluence(res)
#standard error for predicted mean
#Note: using hat_matrix only works for fitted values
predict_mean_se = np.sqrt(infl.hat_matrix_diag*res.mse_resid)
tppf = stats.t.isf(alpha/2., res.df_resid)
predict_mean_ci = np.column_stack([
res.fittedvalues - tppf * predict_mean_se,
res.fittedvalues + tppf * predict_mean_se])
#standard error for predicted observation
predict_se, predict_ci_low, predict_ci_upp = wls_prediction_std(res)
predict_ci = np.column_stack((predict_ci_low, predict_ci_upp))
#standard deviation of residual
resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag))
table_sm = np.column_stack([
np.arange(res.nobs) + 1,
res.model.endog,
res.fittedvalues,
predict_mean_se,
predict_mean_ci[:,0],
predict_mean_ci[:,1],
predict_ci[:,0],
predict_ci[:,1],
res.resid,
resid_se,
infl.resid_studentized_internal,
infl.cooks_distance[0]
])
#colnames, data = lzip(*table_raw) #unzip
data = table_sm
ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue', 'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp', 'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual', 'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"]
colnames = ss2
#self.table_data = data
#data = np.column_stack(data)
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
st = SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
return st, data, ss2
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import operator
import os
import sys
import weakref
import ryu.contrib
ryu.contrib.update_module_path()
import ovs.db.data
import ovs.db.types
import ovs.poller
from ovs import (jsonrpc,
ovsuuid,
stream)
from ovs.db import idl
from ryu.lib import hub
from ryu.lib.ovs import vswitch_idl
LOG = logging.getLogger(__name__) # use ovs.vlog?
# for debug
def ovsrec_row_changes_to_string(ovsrec_row):
if not ovsrec_row._changes:
return ovsrec_row._changes
return dict((key, value.to_string())
for key, value in ovsrec_row._changes.items())
# for debug
def ovsrec_row_to_string(ovsrec_row):
output = ''
output += 'uuid: %s ' % ovsrec_row.uuid
if ovsrec_row._data:
output += '_data: %s ' % dict((key, value.to_string()) for key, value
in ovsrec_row._data.items())
else:
output += '_data: %s ' % ovsrec_row._data
output += '_changes: %s' % ovsrec_row_changes_to_string(ovsrec_row)
return output
def atom_from_string(base, value_string, symtab=None):
type_ = base.type
atom = None
if type_ == ovs.db.types.IntegerType:
atom = ovs.db.data.Atom(type_, int(value_string))
elif type_ == ovs.db.types.RealType:
# TODO:XXX negation
atom = ovs.db.data.Atom(
type_, ovs.db.parser.float_to_int(float(value_string)))
elif type_ == ovs.db.types.BooleanType:
if value_string in ("true", "yes", "on", "1"):
atom = ovs.db.data.Atom(type_, True)
elif value_string == ("false", "no", "off", "0"):
atom = ovs.db.data.Atom(type_, False)
elif type_ == ovs.db.types.StringType:
# TODO:XXXX escape: if value_string[0] == '"':
atom = ovs.db.data.Atom(type_, value_string)
elif type_ == ovs.db.types.UuidType:
if value_string[0] == "@":
assert symtab is not None
uuid_ = symtab[value_string]
atom = ovs.db.data.Atom(type_, uuid_)
else:
atom = ovs.db.data.Atom(type_,
ovs.ovsuuid.from_string(value_string))
if atom is None:
raise ValueError("expected %s" % type_.to_string(), value_string)
atom.check_constraints(base)
return atom
def datum_from_string(type_, value_string, symtab=None):
value_string = value_string.strip()
if type_.is_map():
if value_string.startswith('{'):
# TODO:dict case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
d = dict(v.split('=', 1) for v in value_string.split(','))
d = dict((atom_from_string(type_.key, key, symtab),
atom_from_string(type_.value, value, symtab))
for key, value in d.items())
elif type_.is_set():
if value_string.startswith('['):
# TODO:set case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
values = value_string.split(',')
d = dict((atom_from_string(type_.key, value, symtab), None)
for value in values)
else:
atom = atom_from_string(type_.key, value_string, symtab)
d = {atom: None}
datum = ovs.db.data.Datum(type_, d)
return datum.to_json()
def ifind(pred, seq):
try:
return itertools.ifilter(pred, seq).next()
except StopIteration:
return None
def not_reached():
os.abort()
def vsctl_fatal(msg):
LOG.error(msg)
raise Exception(msg) # not call ovs.utils.ovs_fatal for reusability
class VSCtlBridge(object):
def __init__(self, ovsrec_bridge, name, parent, vlan):
super(VSCtlBridge, self).__init__()
self.br_cfg = ovsrec_bridge
self.name = name
self.ports = set()
self.parent = parent
self.vlan = vlan
self.children = set() # WeakSet is needed?
def find_vlan_bridge(self, vlan):
return ifind(lambda child: child.vlan == vlan, self.children)
class VSCtlPort(object):
def __init__(self, vsctl_bridge_parent, ovsrec_port):
super(VSCtlPort, self).__init__()
self.bridge = weakref.ref(vsctl_bridge_parent) # backpointer
self.port_cfg = ovsrec_port
self.ifaces = set()
self.qos = None
class VSCtlIface(object):
def __init__(self, vsctl_port_parent, ovsrec_iface):
super(VSCtlIface, self).__init__()
self.port = weakref.ref(vsctl_port_parent) # backpointer
self.iface_cfg = ovsrec_iface
class VSCtlQoS(object):
def __init__(self, vsctl_port_parent, ovsrec_qos):
super(VSCtlQoS, self).__init__()
self.port = weakref.ref(vsctl_port_parent)
self.qos_cfg = ovsrec_qos
self.queues = set()
class VSCtlQueue(object):
def __init__(self, vsctl_qos_parent, ovsrec_queue):
super(VSCtlQueue, self).__init__()
self.qos = weakref.ref(vsctl_qos_parent)
self.queue_cfg = ovsrec_queue
class VSCtlContext(object):
def _invalidate_cache(self):
self.cache_valid = False
self.bridges.clear()
self.ports.clear()
self.ifaces.clear()
def __init__(self, idl_, txn, ovsrec_open_vswitch):
super(VSCtlContext, self).__init__()
# Modifiable state
# self.table = None
self.idl = idl_
self.txn = txn
self.ovs = ovsrec_open_vswitch
self.symtab = None # TODO:XXX
self.verified_ports = False
# A cache of the contents of the database.
self.cache_valid = False
self.bridges = {} # bridge name -> VSCtlBridge
self.ports = {} # port name -> VSCtlPort
self.ifaces = {} # iface name -> VSCtlIface
self.try_again = False # used by wait-until command
def done(self):
self._invalidate_cache()
def verify_bridges(self):
self.ovs.verify(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES)
def verify_ports(self):
if self.verified_ports:
return
self.verify_bridges()
for ovsrec_bridge in self.idl.tables[
vswitch_idl.OVSREC_TABLE_BRIDGE].rows.values():
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
for ovsrec_port in self.idl.tables[
vswitch_idl.OVSREC_TABLE_PORT].rows.values():
ovsrec_port.verify(vswitch_idl.OVSREC_PORT_COL_INTERFACES)
self.verified_ports = True
def add_bridge_to_cache(self, ovsrec_bridge, name, parent, vlan):
vsctl_bridge = VSCtlBridge(ovsrec_bridge, name, parent, vlan)
if parent:
parent.children.add(vsctl_bridge)
self.bridges[name] = vsctl_bridge
return vsctl_bridge
def del_cached_bridge(self, vsctl_bridge):
assert not vsctl_bridge.ports
assert not vsctl_bridge.children
parent = vsctl_bridge.parent
if parent:
parent.children.remove(vsctl_bridge)
vsctl_bridge.parent = None # break circular reference
ovsrec_bridge = vsctl_bridge.br_cfg
if ovsrec_bridge:
ovsrec_bridge.delete()
self.ovs_delete_bridge(ovsrec_bridge)
del self.bridges[vsctl_bridge.name]
def del_cached_qos(self, vsctl_qos):
vsctl_qos.port().qos = None
vsctl_qos.port = None
vsctl_qos.queues = None
def add_port_to_cache(self, vsctl_bridge_parent, ovsrec_port):
tag = getattr(ovsrec_port, vswitch_idl.OVSREC_PORT_COL_TAG, None)
if (tag is not None and tag >= 0 and tag < 4096):
vlan_bridge = vsctl_bridge_parent.find_vlan_bridge()
if vlan_bridge:
vsctl_bridge_parent = vlan_bridge
vsctl_port = VSCtlPort(vsctl_bridge_parent, ovsrec_port)
vsctl_bridge_parent.ports.add(vsctl_port)
self.ports[ovsrec_port.name] = vsctl_port
return vsctl_port
def del_cached_port(self, vsctl_port):
assert not vsctl_port.ifaces
vsctl_port.bridge().ports.remove(vsctl_port)
vsctl_port.bridge = None
port = self.ports.pop(vsctl_port.port_cfg.name)
assert port == vsctl_port
vsctl_port.port_cfg.delete()
def add_iface_to_cache(self, vsctl_port_parent, ovsrec_iface):
vsctl_iface = VSCtlIface(vsctl_port_parent, ovsrec_iface)
vsctl_port_parent.ifaces.add(vsctl_iface)
self.ifaces[ovsrec_iface.name] = vsctl_iface
def add_qos_to_cache(self, vsctl_port_parent, ovsrec_qos):
vsctl_qos = VSCtlQoS(vsctl_port_parent, ovsrec_qos)
vsctl_port_parent.qos = vsctl_qos
return vsctl_qos
def add_queue_to_cache(self, vsctl_qos_parent, ovsrec_queue):
vsctl_queue = VSCtlQueue(vsctl_qos_parent, ovsrec_queue)
vsctl_qos_parent.queues.add(vsctl_queue)
def del_cached_iface(self, vsctl_iface):
vsctl_iface.port().ifaces.remove(vsctl_iface)
vsctl_iface.port = None
del self.ifaces[vsctl_iface.iface_cfg.name]
vsctl_iface.iface_cfg.delete()
def invalidate_cache(self):
if not self.cache_valid:
return
self._invalidate_cache()
def populate_cache(self):
self._populate_cache(self.idl.tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
@staticmethod
def port_is_fake_bridge(ovsrec_port):
return (ovsrec_port.fake_bridge and
ovsrec_port.tag >= 0 and ovsrec_port.tag <= 4095)
def _populate_cache(self, ovsrec_bridges):
if self.cache_valid:
return
self.cache_valid = True
bridges = set()
ports = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
LOG.warn('%s: database contains duplicate bridge name', name)
bridges.add(name)
vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name,
None, 0)
if not vsctl_bridge:
continue
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
if port_name in ports:
# Duplicate ovsrec_port name.
# (We will warn about that later.)
continue
ports.add(port_name)
if (self.port_is_fake_bridge(ovsrec_port) and
port_name not in bridges):
bridges.add(port_name)
self.add_bridge_to_cache(None, port_name, vsctl_bridge,
ovsrec_port.tag)
bridges = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
continue
bridges.add(name)
vsctl_bridge = self.bridges[name]
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
vsctl_port = self.ports.get(port_name)
if vsctl_port:
if ovsrec_port == vsctl_port.port_cfg:
LOG.warn('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
else:
LOG.error('%s: database contains duplicate '
'vsctl_port name',
ovsrec_port.name)
continue
if (self.port_is_fake_bridge(ovsrec_port) and
port_name in bridges):
continue
# LOG.debug('ovsrec_port %s %s %s',
# ovsrec_port, ovsrec_port._data, ovsrec_port.tag)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
# LOG.debug('vsctl_port %s', vsctl_port)
for ovsrec_iface in ovsrec_port.interfaces:
iface = self.ifaces.get(ovsrec_iface.name)
if iface:
if ovsrec_iface == iface.iface_cfg:
LOG.warn(
'%s: interface is in multiple ports '
'(%s and %s)',
ovsrec_iface.name,
iface.port().port_cfg.name,
vsctl_port.port_cfg.name)
else:
LOG.error(
'%s: database contains duplicate interface '
'name',
ovsrec_iface.name)
continue
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
ovsrec_qos = ovsrec_port.qos
vsctl_qos = self.add_qos_to_cache(vsctl_port, ovsrec_qos)
if len(ovsrec_qos):
for ovsrec_queue in ovsrec_qos[0].queues:
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
def check_conflicts(self, name, msg):
self.verify_ports()
if name in self.bridges:
vsctl_fatal('%s because a bridge named %s already exists' %
(msg, name))
if name in self.ports:
vsctl_fatal('%s because a port named %s already exists on '
'bridge %s' %
(msg, name, self.ports[name].bridge().name))
if name in self.ifaces:
vsctl_fatal('%s because an interface named %s already '
'exists on bridge %s' %
(msg, name, self.ifaces[name].port().bridge().name))
def find_bridge(self, name, must_exist):
assert self.cache_valid
vsctl_bridge = self.bridges.get(name)
if must_exist and not vsctl_bridge:
vsctl_fatal('no bridge named %s' % name)
self.verify_bridges()
return vsctl_bridge
def find_real_bridge(self, name, must_exist):
vsctl_bridge = self.find_bridge(name, must_exist)
if vsctl_bridge and vsctl_bridge.parent:
vsctl_fatal('%s is a fake bridge' % name)
return vsctl_bridge
def find_bridge_by_id(self, datapath_id, must_exist):
assert self.cache_valid
for vsctl_bridge in self.bridges.values():
if vsctl_bridge.br_cfg.datapath_id[0].strip('"') == datapath_id:
self.verify_bridges()
return vsctl_bridge
if must_exist:
vsctl_fatal('no bridge id %s' % datapath_id)
return None
def find_port(self, name, must_exist):
assert self.cache_valid
vsctl_port = self.ports.get(name)
if vsctl_port and name == vsctl_port.bridge().name:
vsctl_port = None
if must_exist and not vsctl_port:
vsctl_fatal('no vsctl_port named %s' % name)
return vsctl_port
def find_iface(self, name, must_exist):
assert self.cache_valid
vsctl_iface = self.ifaces.get(name)
if vsctl_iface and name == vsctl_iface.port().bridge().name:
vsctl_iface = None
if must_exist and not vsctl_iface:
vsctl_fatal('no interface named %s' % name)
self.verify_ports()
return vsctl_iface
def set_qos(self, vsctl_port, type, max_rate):
qos = vsctl_port.qos.qos_cfg
if not len(qos):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
vsctl_port.port_cfg.qos = [ovsrec_qos]
else:
ovsrec_qos = qos[0]
ovsrec_qos.type = type
if max_rate is not None:
self.set_column(ovsrec_qos, 'other_config', 'max-rate', max_rate)
self.add_qos_to_cache(vsctl_port, [ovsrec_qos])
return ovsrec_qos
def set_queue(self, vsctl_qos, max_rate, min_rate,
queue_id):
ovsrec_qos = vsctl_qos.qos_cfg[0]
try:
ovsrec_queue = ovsrec_qos.queues[queue_id]
except (AttributeError, KeyError):
ovsrec_queue = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QUEUE])
if max_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'max-rate', max_rate)
if min_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'min-rate', min_rate)
self.set_column(ovsrec_qos, 'queues', queue_id,
['uuid', str(ovsrec_queue.uuid)])
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
return ovsrec_queue
@staticmethod
def _column_set(ovsrec_row, column, ovsrec_value):
# need to trigger Row.__setattr__()
setattr(ovsrec_row, column, ovsrec_value)
@staticmethod
def _column_insert(ovsrec_row, column, ovsrec_add):
value = getattr(ovsrec_row, column)
value.append(ovsrec_add)
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def _column_delete(ovsrec_row, column, ovsrec_del):
value = getattr(ovsrec_row, column)
try:
value.remove(ovsrec_del)
except ValueError:
# Datum.to_python() with _uuid_to_row trims down deleted
# references. If ovsrec_del.delete() is called before
# _column_delete(), value doesn't include ovsrec_del.
pass
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def bridge_insert_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_insert(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def bridge_delete_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_delete(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def port_delete_qos(ovsrec_port, ovsrec_qos):
VSCtlContext._column_delete(ovsrec_port,
vswitch_idl.OVSREC_PORT_COL_QOS,
ovsrec_qos)
def ovs_insert_bridge(self, ovsrec_bridge):
self._column_insert(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def ovs_delete_bridge(self, ovsrec_bridge):
self._column_delete(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def del_port(self, vsctl_port):
if vsctl_port.bridge().parent:
ovsrec_bridge = vsctl_port.bridge().parent.br_cfg
else:
ovsrec_bridge = vsctl_port.bridge().br_cfg
self.bridge_delete_port(ovsrec_bridge, vsctl_port.port_cfg)
for vsctl_iface in vsctl_port.ifaces.copy():
self.del_cached_iface(vsctl_iface)
self.del_cached_port(vsctl_port)
def del_bridge(self, vsctl_bridge):
for child in vsctl_bridge.children.copy():
self.del_bridge(child)
for vsctl_port in vsctl_bridge.ports.copy():
self.del_port(vsctl_port)
self.del_cached_bridge(vsctl_bridge)
def del_qos(self, vsctl_qos):
ovsrec_port = vsctl_qos.port().port_cfg
ovsrec_qos = vsctl_qos.qos_cfg
if len(ovsrec_qos):
self.port_delete_qos(ovsrec_port, ovsrec_qos[0])
self.del_cached_qos(vsctl_qos)
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, key, value_json)
where column and key are str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s',
br_name, port_name, vsctl_port.bridge().name)
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for setting in settings:
# TODO:XXX self.symtab:
column, key, value = setting
self.set_column(ovsrec_port, column, key, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
def add_bridge(self, br_name, parent_name=None, vlan=0, may_exist=False):
self.populate_cache()
if may_exist:
vsctl_bridge = self.find_bridge(br_name, False)
if vsctl_bridge:
if not parent_name:
if vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s" '
'but %s is a VLAN bridge for VLAN %d' %
(br_name, br_name, vsctl_bridge.vlan))
else:
if not vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is not a VLAN bridge' %
(br_name, parent_name, vlan, br_name))
elif vsctl_bridge.parent.name != parent_name:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s has the wrong parent %s' %
(br_name, parent_name, vlan,
br_name, vsctl_bridge.parent.name))
elif vsctl_bridge.vlan != vlan:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is a VLAN bridge for the wrong '
'VLAN %d' %
(br_name, parent_name, vlan, br_name,
vsctl_bridge.vlan))
return
self.check_conflicts(br_name,
'cannot create a bridge named %s' % br_name)
txn = self.txn
tables = self.idl.tables
if not parent_name:
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = False
ovsrec_bridge = txn.insert(tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
ovsrec_bridge.name = br_name
ovsrec_bridge.ports = [ovsrec_port]
self.ovs_insert_bridge(ovsrec_bridge)
else:
parent = self.find_bridge(parent_name, False)
if parent and parent.parent:
vsctl_fatal('cannot create bridge with fake bridge as parent')
if not parent:
vsctl_fatal('parent bridge %s does not exist' % parent_name)
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = True
ovsrec_port.tag = vlan
self.bridge_insert_port(parent.br_cfg, ovsrec_port)
self.invalidate_cache()
@staticmethod
def parse_column_key_value(table_schema, setting_string):
"""
parse <column>[:<key>]=<value>
"""
column_value = setting_string.split('=', 1)
if len(column_value) == 1:
column = column_value[0]
value = None
else:
column, value = column_value
if ':' in column:
column, key = column.split(':', 1)
else:
key = None
if value is not None:
LOG.debug("columns %s", table_schema.columns.keys())
type_ = table_schema.columns[column].type
value = datum_from_string(type_, value)
LOG.debug("column %s value %s", column, value)
return (column, key, value)
def set_column(self, ovsrec_row, column, key, value_json):
if column not in ovsrec_row._table.columns:
vsctl_fatal('%s does not contain a column whose name matches "%s"'
% (ovsrec_row._table.name, column))
column_schema = ovsrec_row._table.columns[column]
if key is not None:
value_json = ['map', [[key, value_json]]]
if column_schema.type.value.type == ovs.db.types.VoidType:
vsctl_fatal('cannot specify key to set for non-map column %s' %
column)
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
values = getattr(ovsrec_row, column, {})
values.update(datum.to_python(ovs.db.idl._uuid_to_row))
setattr(ovsrec_row, column, values)
else:
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
setattr(ovsrec_row, column,
datum.to_python(ovs.db.idl._uuid_to_row))
def _get_row_by_id(self, table_name, vsctl_row_id, record_id):
if not vsctl_row_id.table:
return None
if not vsctl_row_id.name_column:
if record_id != '.':
return None
values = self.idl.tables[vsctl_row_id.table].rows.values()
if not values or len(values) > 2:
return None
referrer = values[0]
else:
referrer = None
for ovsrec_row in self.idl.tables[
vsctl_row_id.table].rows.values():
name = getattr(ovsrec_row, vsctl_row_id.name_column)
assert type(name) in (list, str, unicode)
if type(name) != list and name == record_id:
if (referrer):
vsctl_fatal('multiple rows in %s match "%s"' %
(table_name, record_id))
referrer = ovsrec_row
if not referrer:
return None
final = None
if vsctl_row_id.uuid_column:
referrer.verify(vsctl_row_id.uuid_column)
uuid = getattr(referrer, vsctl_row_id.uuid_column)
uuid_ = referrer._data[vsctl_row_id.uuid_column]
assert uuid_.type.key.type == ovs.db.types.UuidType
assert uuid_.type.value is None
assert type(uuid) == list
if len(uuid) == 1:
final = uuid[0]
else:
final = referrer
return final
def get_row(self, vsctl_table, record_id):
table_name = vsctl_table.table_name
if ovsuuid.is_valid_string(record_id):
uuid = ovsuuid.from_string(record_id)
return self.idl.tables[table_name].rows.get(uuid)
else:
for vsctl_row_id in vsctl_table.row_ids:
ovsrec_row = self._get_row_by_id(table_name, vsctl_row_id,
record_id)
if ovsrec_row:
return ovsrec_row
return None
def must_get_row(self, vsctl_table, record_id):
ovsrec_row = self.get_row(vsctl_table, record_id)
if not ovsrec_row:
vsctl_fatal('no row "%s" in table %s' % (record_id,
vsctl_table.table_name))
return ovsrec_row
class _CmdShowTable(object):
def __init__(self, table, name_column, columns, recurse):
super(_CmdShowTable, self).__init__()
self.table = table
self.name_column = name_column
self.columns = columns
self.recurse = recurse
class _VSCtlRowID(object):
def __init__(self, table, name_column, uuid_column):
super(_VSCtlRowID, self).__init__()
self.table = table
self.name_column = name_column
self.uuid_column = uuid_column
class _VSCtlTable(object):
def __init__(self, table_name, vsctl_row_id_list):
super(_VSCtlTable, self).__init__()
self.table_name = table_name
self.row_ids = vsctl_row_id_list
class VSCtlCommand(object):
def __init__(self, command, args=None, options=None):
super(VSCtlCommand, self).__init__()
self.command = command
self.args = args or []
self.options = options or []
# Data modified by commands
self.result = None
# internally used by VSCtl
self._prerequisite = None
self._run = None
def has_option(self, option):
return option in self.options
class VSCtl(object):
def _reset(self):
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def __init__(self, remote):
super(VSCtl, self).__init__()
self.remote = remote
self.schema_json = None
self.schema = None
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def _rpc_get_schema_json(self, database):
LOG.debug('remote %s', self.remote)
error, stream_ = stream.Stream.open_block(
stream.Stream.open(self.remote))
if error:
vsctl_fatal('error %s' % os.strerror(error))
rpc = jsonrpc.Connection(stream_)
request = jsonrpc.Message.create_request('get_schema', [database])
error, reply = rpc.transact_block(request)
rpc.close()
if error:
vsctl_fatal(os.strerror(error))
elif reply.error:
vsctl_fatal('error %s' % reply.error)
return reply.result
def _init_schema_helper(self):
if self.schema_json is None:
self.schema_json = self._rpc_get_schema_json(
vswitch_idl.OVSREC_DB_NAME)
schema_helper = idl.SchemaHelper(None, self.schema_json)
schema_helper.register_all()
self.schema = schema_helper.get_idl_schema()
# LOG.debug('schema_json %s', schema_json)
self.schema_helper = idl.SchemaHelper(None, self.schema_json)
@staticmethod
def _idl_block(idl_):
poller = ovs.poller.Poller()
idl_.wait(poller)
poller.block()
@staticmethod
def _idl_wait(idl_, seqno):
while idl_.change_seqno == seqno and not idl_.run():
VSCtl._idl_block(idl_)
def _run_prerequisites(self, commands):
schema_helper = self.schema_helper
schema_helper.register_table(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH)
if self.wait_for_reload:
# LOG.debug('schema_helper._tables %s', schema_helper._tables)
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_CUR_CFG])
for command in commands:
if not command._prerequisite:
continue
ctx = VSCtlContext(None, None, None)
command._prerequisite(ctx, command)
ctx.done()
def _do_vsctl(self, idl_, commands):
txn = idl.Transaction(idl_)
self.txn = txn
if self.dry_run:
txn.dry_run = True
txn.add_comment('ovs-vsctl') # TODO:XXX add operation name. args
ovs_rows = idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH].rows
if ovs_rows:
ovs_ = ovs_rows.values()[0]
else:
# XXX add verification that table is empty
ovs_ = txn.insert(
idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH])
if self.wait_for_reload:
ovs_.increment(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_NEXT_CFG)
# TODO:XXX
# symtab = ovsdb_symbol_table_create()
ctx = VSCtlContext(idl_, txn, ovs_)
for command in commands:
if not command._run:
continue
command._run(ctx, command)
if ctx.try_again:
return False
LOG.debug('result:\n%s', [command.result for command in commands])
ctx.done()
# TODO:XXX check if created symbols are really created, referenced.
status = txn.commit_block()
next_cfg = 0
if self.wait_for_reload and status == idl.Transaction.SUCCESS:
next_cfg = txn.get_increment_new_value()
# TODO:XXX
# if status in (idl.Transaction.UNCHANGED, idl.Transaction.SUCCESS):
# for command in commands:
# if not command.post_func:
# continue
# ctx = VSCtlContext(idl_, txn, self.ovs)
# command.post_func(ctx)
# ctx.done()
txn_ = self.txn
self.txn = None
txn = None
if status in (idl.Transaction.UNCOMMITTED, idl.Transaction.INCOMPLETE):
not_reached()
elif status == idl.Transaction.ABORTED:
vsctl_fatal('transaction aborted')
elif status == idl.Transaction.UNCHANGED:
LOG.info('unchanged')
elif status == idl.Transaction.SUCCESS:
LOG.info('success')
elif status == idl.Transaction.TRY_AGAIN:
return False
elif status == idl.Transaction.ERROR:
vsctl_fatal('transaction error: %s' % txn_.get_error())
elif status == idl.Transaction.NOT_LOCKED:
vsctl_fatal('database not locked')
else:
not_reached()
if self.wait_for_reload and status != idl.Transaction.UNCHANGED:
while True:
idl_.run()
if (ovs_.cur_cfg >= next_cfg):
break
self._idl_block(idl_)
return True
def _do_main(self, commands):
"""
:type commands: list of VSCtlCommand
"""
self._reset()
self._init_schema_helper()
self._run_prerequisites(commands)
idl_ = idl.Idl(self.remote, self.schema_helper)
seqno = idl_.change_seqno
while True:
self._idl_wait(idl_, seqno)
seqno = idl_.change_seqno
if self._do_vsctl(idl_, commands):
break
if self.txn:
self.txn.abort()
self.txn = None
# TODO:XXX
# ovsdb_symbol_table_destroy(symtab)
idl_.close()
def _run_command(self, commands):
"""
:type commands: list of VSCtlCommand
"""
all_commands = {
# Open vSwitch commands.
'init': (None, self._cmd_init),
'show': (self._pre_cmd_show, self._cmd_show),
# Bridge commands.
'add-br': (self._pre_add_br, self._cmd_add_br),
'del-br': (self._pre_get_info, self._cmd_del_br),
'list-br': (self._pre_get_info, self._cmd_list_br),
# Port. commands
'list-ports': (self._pre_get_info, self._cmd_list_ports),
'add-port': (self._pre_cmd_add_port, self._cmd_add_port),
'del-port': (self._pre_get_info, self._cmd_del_port),
# 'add-bond':
# 'port-to-br':
# Interface commands.
'list-ifaces': (self._pre_get_info, self._cmd_list_ifaces),
# 'iface-to-br':
# Controller commands.
'get-controller': (self._pre_controller, self._cmd_get_controller),
'del-controller': (self._pre_controller, self._cmd_del_controller),
'set-controller': (self._pre_controller, self._cmd_set_controller),
# 'get-fail-mode':
# 'del-fail-mode':
# 'set-fail-mode':
# Manager commands.
# 'get-manager':
# 'del-manager':
# 'set-manager':
# Switch commands.
# 'emer-reset':
# Database commands.
# 'comment':
'get': (self._pre_cmd_get, self._cmd_get),
# 'list':
'find': (self._pre_cmd_find, self._cmd_find),
'set': (self._pre_cmd_set, self._cmd_set),
# 'add':
'clear': (self._pre_cmd_clear, self._cmd_clear),
# 'create':
# 'destroy':
# 'wait-until':
'set-qos': (self._pre_cmd_set_qos, self._cmd_set_qos),
'set-queue': (self._pre_cmd_set_queue, self._cmd_set_queue),
'del-qos': (self._pre_get_info, self._cmd_del_qos),
# for quantum_adapter
'list-ifaces-verbose': (self._pre_cmd_list_ifaces_verbose,
self._cmd_list_ifaces_verbose),
}
for command in commands:
funcs = all_commands[command.command]
command._prerequisite, command._run = funcs
self._do_main(commands)
def run_command(self, commands, timeout_sec=None, exception=None):
if timeout_sec is None:
self._run_command(commands)
else:
with hub.Timeout(timeout_sec, exception):
self._run_command(commands)
# commands
def _cmd_init(self, _ctx, _command):
# nothing. Just check connection to ovsdb
pass
_CMD_SHOW_TABLES = [
_CmdShowTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH, None,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_MANAGER_OPTIONS,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_OVS_VERSION],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
[vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
[vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_TRUNKS,
vswitch_idl.OVSREC_PORT_COL_INTERFACES],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
vswitch_idl.OVSREC_CONTROLLER_COL_TARGET,
[vswitch_idl.OVSREC_CONTROLLER_COL_IS_CONNECTED],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
[vswitch_idl.OVSREC_MANAGER_COL_IS_CONNECTED],
False),
]
def _pre_cmd_show(self, _ctx, _command):
schema_helper = self.schema_helper
for show in self._CMD_SHOW_TABLES:
schema_helper.register_table(show.table)
if show.name_column:
schema_helper.register_columns(show.table, [show.name_column])
schema_helper.register_columns(show.table, show.columns)
@staticmethod
def _cmd_show_find_table_by_row(row):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == row._table.name:
return show
return None
@staticmethod
def _cmd_show_find_table_by_name(name):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == name:
return show
return None
@staticmethod
def _cmd_show_row(ctx, row, level):
_INDENT_SIZE = 4 # # of spaces per indent
show = VSCtl._cmd_show_find_table_by_row(row)
output = ''
output += ' ' * level * _INDENT_SIZE
if show and show.name_column:
output += '%s ' % show.table
datum = getattr(row, show.name_column)
output += datum
else:
output += str(row.uuid)
output += '\n'
if not show or show.recurse:
return
show.recurse = True
for column in show.columns:
datum = row._data[column]
key = datum.type.key
if (key.type == ovs.db.types.UuidType and key.ref_table_name):
ref_show = VSCtl._cmd_show_find_table_by_name(
key.ref_table_name)
if ref_show:
for atom in datum.values:
ref_row = ctx.idl.tables[ref_show.table].rows.get(
atom.value)
if ref_row:
VSCtl._cmd_show_row(ctx, ref_row, level + 1)
continue
if not datum.is_default():
output += ' ' * (level + 1) * _INDENT_SIZE
output += '%s: %s\n' % (column, datum)
show.recurse = False
return output
def _cmd_show(self, ctx, command):
for row in ctx.idl.tables[
self._CMD_SHOW_TABLES[0].table].rows.values():
output = self._cmd_show_row(ctx, row, 0)
command.result = output
def _pre_get_info(self, _ctx, _command):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_FAKE_BRIDGE,
vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_INTERFACES,
vswitch_idl.OVSREC_PORT_COL_QOS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_NAME])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_QUEUES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[])
def _cmd_list_br(self, ctx, command):
ctx.populate_cache()
command.result = sorted(ctx.bridges.keys())
def _pre_add_br(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE])
def _cmd_add_br(self, ctx, command):
br_name = command.args[0]
if len(command.args) == 1:
parent_name = None
vlan = 0
elif len(command.args) == 3:
parent_name = command.args[1]
vlan = int(command.args[2])
if vlan < 0 or vlan > 4095:
vsctl_fatal("vlan must be between 0 and 4095 %d" % vlan)
else:
vsctl_fatal('this command takes exactly 1 or 3 argument')
ctx.add_bridge(br_name, parent_name, vlan)
def _del_br(self, ctx, br_name, must_exist=False):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist)
if br:
ctx.del_bridge(br)
def _cmd_del_br(self, ctx, command):
br_name = command.args[0]
self._del_br(ctx, br_name)
def _list_ports(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
if br.br_cfg:
br.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
else:
br.parent.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
return [port.port_cfg.name for port in br.ports
if port.port_cfg.name != br.name]
def _cmd_list_ports(self, ctx, command):
br_name = command.args[0]
port_names = self._list_ports(ctx, br_name)
command.result = sorted(port_names)
def _pre_add_port(self, _ctx, columns):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_BOND_FAKE_IFACE])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT, columns)
def _pre_cmd_add_port(self, ctx, command):
self._pre_get_info(ctx, command)
columns = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)[0]
for setting in command.args[2:]]
self._pre_add_port(ctx, columns)
def _cmd_add_port(self, ctx, command):
may_exist = command.has_option('--may_exist')
br_name = command.args[0]
port_name = command.args[1]
iface_names = [command.args[1]]
settings = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)
for setting in command.args[2:]]
ctx.add_port(br_name, port_name, may_exist,
False, iface_names, settings)
def _del_port(self, ctx, br_name=None, target=None,
must_exist=False, with_iface=False):
assert target is not None
ctx.populate_cache()
if not with_iface:
vsctl_port = ctx.find_port(target, must_exist)
else:
vsctl_port = ctx.find_port(target, False)
if not vsctl_port:
vsctl_iface = ctx.find_iface(target, False)
if vsctl_iface:
vsctl_port = vsctl_iface.port()
if must_exist and not vsctl_port:
vsctl_fatal('no port or interface named %s' % target)
if not vsctl_port:
return
if not br_name:
vsctl_bridge = ctx.find_bridge(br_name, True)
if vsctl_port.bridge() != vsctl_bridge:
if vsctl_port.bridge().parent == vsctl_bridge:
vsctl_fatal('bridge %s does not have a port %s (although '
'its parent bridge %s does)' %
(br_name, target, vsctl_bridge.parent.name))
else:
vsctl_fatal('bridge %s does not have a port %s' %
(br_name, target))
ctx.del_port(vsctl_port)
def _cmd_del_port(self, ctx, command):
must_exist = command.has_option('--must-exist')
with_iface = command.has_option('--with-iface')
target = command.args[-1]
br_name = command.args[0] if len(command.args) == 2 else None
self._del_port(ctx, br_name, target, must_exist, with_iface)
def _list_ifaces(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
ctx.verify_ports()
iface_names = set()
for vsctl_port in br.ports:
for vsctl_iface in vsctl_port.ifaces:
iface_name = vsctl_iface.iface_cfg.name
if iface_name != br_name:
iface_names.add(iface_name)
return iface_names
def _cmd_list_ifaces(self, ctx, command):
br_name = command.args[0]
iface_names = self._list_ifaces(ctx, br_name)
command.result = sorted(iface_names)
def _pre_cmd_list_ifaces_verbose(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_DATAPATH_ID])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
vswitch_idl.OVSREC_INTERFACE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS,
vswitch_idl.OVSREC_INTERFACE_COL_OFPORT])
@staticmethod
def _iface_to_dict(iface_cfg):
_ATTRIBUTE = ['name', 'ofport', 'type', 'external_ids', 'options']
attr = dict((key, getattr(iface_cfg, key)) for key in _ATTRIBUTE)
if attr['ofport']:
attr['ofport'] = attr['ofport'][0]
return attr
def _list_ifaces_verbose(self, ctx, datapath_id, port_name):
ctx.populate_cache()
br = ctx.find_bridge_by_id(datapath_id, True)
ctx.verify_ports()
iface_cfgs = []
if port_name is None:
for vsctl_port in br.ports:
iface_cfgs.extend(self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces)
else:
# When port is created, ofport column might be None.
# So try with port name if it happended
for vsctl_port in br.ports:
iface_cfgs.extend(
self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces
if (vsctl_iface.iface_cfg.name == port_name))
return iface_cfgs
def _cmd_list_ifaces_verbose(self, ctx, command):
datapath_id = command.args[0]
port_name = None
if len(command.args) >= 2:
port_name = command.args[1]
LOG.debug('command.args %s', command.args)
iface_cfgs = self._list_ifaces_verbose(ctx, datapath_id, port_name)
command.result = sorted(iface_cfgs)
def _verify_controllers(self, ovsrec_bridge):
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)
for controller in ovsrec_bridge.controller:
controller.verify(vswitch_idl.OVSREC_CONTROLLER_COL_TARGET)
def _pre_controller(self, ctx, command):
self._pre_get_info(ctx, command)
self.schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_CONTROLLER,
[vswitch_idl.OVSREC_CONTROLLER_COL_TARGET])
def _get_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
self._verify_controllers(br.br_cfg)
return set(controller.target for controller in br.br_cfg.controller)
def _cmd_get_controller(self, ctx, command):
br_name = command.args[0]
controller_names = self._get_controller(ctx, br_name)
command.result = sorted(controller_names)
def _delete_controllers(self, ovsrec_controllers):
for controller in ovsrec_controllers:
controller.delete()
def _del_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_real_bridge(br_name, True)
ovsrec_bridge = br.br_cfg
self._verify_controllers(ovsrec_bridge)
if ovsrec_bridge.controller:
self._delete_controllers(ovsrec_bridge.controller)
ovsrec_bridge.controller = []
def _cmd_del_controller(self, ctx, command):
br_name = command.args[0]
self._del_controller(ctx, br_name)
def _insert_controllers(self, controller_names):
ovsrec_controllers = []
for name in controller_names:
# TODO: check if the name startswith() supported protocols
ovsrec_controller = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_CONTROLLER])
ovsrec_controller.target = name
ovsrec_controllers.append(ovsrec_controller)
return ovsrec_controllers
def _insert_qos(self):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
return ovsrec_qos
def _set_controller(self, ctx, br_name, controller_names):
ctx.populate_cache()
ovsrec_bridge = ctx.find_real_bridge(br_name, True).br_cfg
self._verify_controllers(ovsrec_bridge)
self._delete_controllers(ovsrec_bridge.controller)
controllers = self._insert_controllers(controller_names)
ovsrec_bridge.controller = controllers
def _cmd_set_controller(self, ctx, command):
br_name = command.args[0]
controller_names = command.args[1:]
self._set_controller(ctx, br_name, controller_names)
def _del_qos(self, ctx, port_name):
assert port_name is not None
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
ctx.del_qos(vsctl_qos)
def _cmd_del_qos(self, ctx, command):
port_name = command.args[0]
self._del_qos(ctx, port_name)
def _set_qos(self, ctx, port_name, type, max_rate):
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
ovsrec_qos = ctx.set_qos(vsctl_port, type, max_rate)
return ovsrec_qos
def _cmd_set_qos(self, ctx, command):
port_name = command.args[0]
type = command.args[1]
max_rate = command.args[2]
result = self._set_qos(ctx, port_name, type, max_rate)
command.result = [result]
def _pre_cmd_set_qos(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QOS_COL_OTHER_CONFIG,
vswitch_idl.OVSREC_QOS_COL_QUEUES,
vswitch_idl.OVSREC_QOS_COL_TYPE])
def _cmd_set_queue(self, ctx, command):
ctx.populate_cache()
port_name = command.args[0]
queues = command.args[1]
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
queue_id = 0
results = []
for queue in queues:
max_rate = queue.get('max-rate', None)
min_rate = queue.get('min-rate', None)
ovsrec_queue = ctx.set_queue(
vsctl_qos, max_rate, min_rate, queue_id)
results.append(ovsrec_queue)
queue_id += 1
command.result = results
def _pre_cmd_set_queue(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[vswitch_idl.OVSREC_QUEUE_COL_DSCP,
vswitch_idl.OVSREC_QUEUE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QUEUE_COL_OTHER_CONFIG])
_TABLES = [
_VSCtlTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MIRROR,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MIRROR,
vswitch_idl.OVSREC_MIRROR_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MANAGER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_NETFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_NETFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_PORT,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QOS,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_QOS)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QUEUE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_QOS,
None,
vswitch_idl.OVSREC_QOS_COL_QUEUES)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SSL,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_SSL)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_SFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
vswitch_idl.OVSREC_FLOW_TABLE_COL_NAME,
None)]),
]
@staticmethod
def _score_partial_match(name, s):
_MAX_SCORE = 0xffffffff
assert len(name) < _MAX_SCORE
s = s[:_MAX_SCORE - 1] # in practice, this doesn't matter
if name == s:
return _MAX_SCORE
name = name.lower().replace('-', '_')
s = s.lower().replace('-', '_')
if s.startswith(name):
return _MAX_SCORE - 1
if name.startswith(s):
return len(s)
return 0
@staticmethod
def _get_table(table_name):
best_match = None
best_score = 0
for table in VSCtl._TABLES:
score = VSCtl._score_partial_match(table.table_name, table_name)
if score > best_score:
best_match = table
best_score = score
elif score == best_score:
best_match = None
if best_match:
return best_match
elif best_score:
vsctl_fatal('multiple table names match "%s"' % table_name)
else:
vsctl_fatal('unknown table "%s"' % table_name)
def _pre_get_table(self, _ctx, table_name):
vsctl_table = self._get_table(table_name)
schema_helper = self.schema_helper
schema_helper.register_table(vsctl_table.table_name)
for row_id in vsctl_table.row_ids:
if row_id.table:
schema_helper.register_table(row_id.table)
if row_id.name_column:
schema_helper.register_columns(row_id.table,
[row_id.name_column])
if row_id.uuid_column:
schema_helper.register_columns(row_id.table,
[row_id.uuid_column])
return vsctl_table
def _get_column(self, table_name, column_name):
best_match = None
best_score = 0
columns = self.schema.tables[table_name].columns.keys()
for column in columns:
score = VSCtl._score_partial_match(column, column_name)
if score > best_score:
best_match = column
best_score = score
elif score == best_score:
best_match = None
if best_match:
# ovs.db.schema_helper._keep_table_columns() requires that
# column_name is type of str. Not unicode string
return str(best_match)
elif best_score:
vsctl_fatal('%s contains more than one column whose name '
'matches "%s"' % (table_name, column_name))
else:
vsctl_fatal('%s does not contain a column whose name matches '
'"%s"' % (table_name, column_name))
def _pre_get_column(self, _ctx, table_name, column):
column_name = self._get_column(table_name, column)
self.schema_helper.register_columns(table_name, [column_name])
def _pre_get(self, ctx, table_name, columns):
vsctl_table = self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, vsctl_table.table_name, column)
def _pre_cmd_get(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema, column_key)[0]
for column_key in command.args[2:]]
self._pre_get(ctx, table_name, columns)
def _get(self, ctx, table_name, record_id, column_keys,
id_=None, if_exists=False):
"""
:type column_keys: list of (column, key_string)
where column and key are str
"""
vsctl_table = self._get_table(table_name)
row = ctx.must_get_row(vsctl_table, record_id)
if id_:
raise NotImplementedError() # TODO:XXX
symbol, new = ctx.create_symbol(id_)
if not new:
vsctl_fatal('row id "%s" specified on "get" command was used '
'before it was defined' % id_)
symbol.uuid = row.uuid
symbol.strong_ref = True
values = []
for column, key_string in column_keys:
row.verify(column)
datum = getattr(row, column)
if key_string:
if type(datum) != dict:
vsctl_fatal('cannot specify key to get for non-map column '
'%s' % column)
values.append(datum[key_string])
else:
values.append(datum)
return values
def _cmd_get(self, ctx, command):
id_ = None # TODO:XXX --id
if_exists = command.has_option('--if-exists')
table_name = command.args[0]
record_id = command.args[1]
table_schema = self.schema.tables[table_name]
column_keys = [ctx.parse_column_key_value(table_schema, column_key)[:2]
for column_key in command.args[2:]]
values = self._get(ctx, table_name, record_id, column_keys,
id_, if_exists)
command.result = values
def _pre_cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[1:]]
LOG.debug('columns %s', columns)
self._pre_get(ctx, table_name, columns)
def _check_value(self, ovsrec_row, column_key_value):
column, key, value_json = column_key_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if key is None:
if datum == value:
return True
else:
if datum[key] != value:
return True
return False
def _find(self, ctx, table_name, column_key_values):
result = []
for ovsrec_row in ctx.idl.tables[table_name].rows.values():
LOG.debug('ovsrec_row %s', ovsrec_row_to_string(ovsrec_row))
if all(self._check_value(ovsrec_row, column_key_value)
for column_key_value in column_key_values):
result.append(ovsrec_row)
return result
def _cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[1:]]
command.result = self._find(ctx, table_name, column_key_values)
def _check_mutable(self, table_name, column):
column_schema = self.schema.tables[table_name].columns[column]
if not column_schema.mutable:
vsctl_fatal('cannot modify read-only column %s in table %s' %
(column, table_name))
def _pre_set(self, ctx, table_name, columns):
self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_set(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[2:]]
self._pre_set(ctx, table_name, columns)
def _set(self, ctx, table_name, record_id, column_key_values):
"""
:type column_key_values: list of (column, key_string, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, key, value in column_key_values:
ctx.set_column(ovsrec_row, column, key, value)
ctx.invalidate_cache()
def _cmd_set(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
# column_key_value: <column>[:<key>]=<value>
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[2:]]
self._set(ctx, table_name, record_id, column_key_values)
def _pre_clear(self, ctx, table_name, column):
self._pre_get_table(ctx, table_name)
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_clear(self, ctx, command):
table_name = command.args[0]
column = command.args[2]
self._pre_clear(ctx, table_name, column)
def _clear(self, ctx, table_name, record_id, column):
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
column_schema = ctx.idl.tables[table_name].columns[column]
if column_schema.type.n_min > 0:
vsctl_fatal('"clear" operation cannot be applied to column %s '
'of table %s, which is not allowed to be empty' %
(column, table_name))
# assuming that default datum is empty.
default_datum = ovs.db.data.Datum.default(column_schema.type)
setattr(ovsrec_row, column,
default_datum.to_python(ovs.db.idl._uuid_to_row))
ctx.invalidate_cache()
def _cmd_clear(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
self._clear(ctx, table_name, record_id, column)
#
# Create constants from ovs db schema
#
def schema_print(schema_location, prefix):
prefix = prefix.upper()
json = ovs.json.from_file(schema_location)
schema = ovs.db.schema.DbSchema.from_json(json)
print('# Do NOT edit.')
print('# This is automatically generated.')
print('# created based on version %s' % (schema.version or 'unknown'))
print('')
print('')
print('%s_DB_NAME = \'%s\'' % (prefix, schema.name))
for table in sorted(schema.tables.values(),
key=operator.attrgetter('name')):
print('')
print('%s_TABLE_%s = \'%s\'' % (prefix,
table.name.upper(), table.name))
for column in sorted(table.columns.values(),
key=operator.attrgetter('name')):
print('%s_%s_COL_%s = \'%s\'' % (prefix, table.name.upper(),
column.name.upper(),
column.name))
def main():
if len(sys.argv) <= 2:
print('Usage: %s <schema file> <prefix>' % sys.argv[0])
location = sys.argv[1]
prefix = sys.argv[2]
schema_print(location, prefix)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Rackspace, Inc.
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import collections
import time
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import retrying
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import rpcapi
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic import objects
agent_opts = [
cfg.IntOpt('heartbeat_timeout',
default=300,
help=_('Maximum interval (in seconds) for agent heartbeats.')),
cfg.IntOpt('post_deploy_get_power_state_retries',
default=6,
help=_('Number of times to retry getting power state to check '
'if bare metal node has been powered off after a soft '
'power off.')),
cfg.IntOpt('post_deploy_get_power_state_retry_interval',
default=5,
help=_('Amount of time (in seconds) to wait between polling '
'power state after trigger soft poweroff.')),
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, group='agent')
LOG = log.getLogger(__name__)
# This contains a nested dictionary containing the post clean step
# hooks registered for each clean step of every interface.
# Every key of POST_CLEAN_STEP_HOOKS is an interface and its value
# is a dictionary. For this inner dictionary, the key is the name of
# the clean-step method in the interface, and the value is the post
# clean-step hook -- the function that is to be called after successful
# completion of the clean step.
#
# For example:
# POST_CLEAN_STEP_HOOKS =
# {
# 'raid': {'create_configuration': <post-create function>,
# 'delete_configuration': <post-delete function>}
# }
#
# It means that method '<post-create function>' is to be called after
# successfully completing the clean step 'create_configuration' of
# raid interface. '<post-delete function>' is to be called after
# completing 'delete_configuration' of raid interface.
POST_CLEAN_STEP_HOOKS = {}
VENDOR_PROPERTIES = {
'deploy_forces_oob_reboot': _(
'Whether Ironic should force a reboot of the Node via the out-of-band '
'channel after deployment is complete. Provides compatibility with '
'older deploy ramdisks. Defaults to False. Optional.')
}
def _get_client():
client = agent_client.AgentClient()
return client
def post_clean_step_hook(interface, step):
"""Decorator method for adding a post clean step hook.
This is a mechanism for adding a post clean step hook for a particular
clean step. The hook will get executed after the clean step gets executed
successfully. The hook is not invoked on failure of the clean step.
Any method to be made as a hook may be decorated with @post_clean_step_hook
mentioning the interface and step after which the hook should be executed.
A TaskManager instance and the object for the last completed command
(provided by agent) will be passed to the hook method. The return value of
this method will be ignored. Any exception raised by this method will be
treated as a failure of the clean step and the node will be moved to
CLEANFAIL state.
:param interface: name of the interface
:param step: The name of the step after which it should be executed.
:returns: A method which registers the given method as a post clean
step hook.
"""
def decorator(func):
POST_CLEAN_STEP_HOOKS.setdefault(interface, {})[step] = func
return func
return decorator
def _get_post_clean_step_hook(node):
"""Get post clean step hook for the currently executing clean step.
This method reads node.clean_step and returns the post clean
step hook for the currently executing clean step.
:param node: a node object
:returns: a method if there is a post clean step hook for this clean
step; None otherwise
"""
interface = node.clean_step.get('interface')
step = node.clean_step.get('step')
try:
return POST_CLEAN_STEP_HOOKS[interface][step]
except KeyError:
pass
class BaseAgentVendor(base.VendorInterface):
def __init__(self):
self.supported_payload_versions = ['2']
self._client = _get_client()
def continue_deploy(self, task, **kwargs):
"""Continues the deployment of baremetal node.
This method continues the deployment of the baremetal node after
the ramdisk have been booted.
:param task: a TaskManager instance
"""
pass
def deploy_has_started(self, task):
"""Check if the deployment has started already.
:returns: True if the deploy has started, False otherwise.
"""
pass
def deploy_is_done(self, task):
"""Check if the deployment is already completed.
:returns: True if the deployment is completed. False otherwise
"""
pass
def reboot_to_instance(self, task, **kwargs):
"""Method invoked after the deployment is completed.
:param task: a TaskManager instance
"""
pass
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return VENDOR_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate the driver-specific Node deployment info.
No validation necessary.
:param task: a TaskManager instance
:param method: method to be validated
"""
pass
def driver_validate(self, method, **kwargs):
"""Validate the driver deployment info.
:param method: method to be validated.
"""
version = kwargs.get('version')
if not version:
raise exception.MissingParameterValue(_('Missing parameter '
'version'))
if version not in self.supported_payload_versions:
raise exception.InvalidParameterValue(_('Unknown lookup '
'payload version: %s')
% version)
def notify_conductor_resume_clean(self, task):
LOG.debug('Sending RPC to conductor to resume cleaning for node %s',
task.node.uuid)
uuid = task.node.uuid
rpc = rpcapi.ConductorAPI()
topic = rpc.get_topic_for(task.node)
# Need to release the lock to let the conductor take it
task.release_resources()
rpc.continue_node_clean(task.context, uuid, topic=topic)
def _refresh_clean_steps(self, task):
"""Refresh the node's cached clean steps from the booted agent.
Gets the node's clean steps from the booted agent and caches them.
The steps are cached to make get_clean_steps() calls synchronous, and
should be refreshed as soon as the agent boots to start cleaning or
if cleaning is restarted because of a cleaning version mismatch.
:param task: a TaskManager instance
:raises: NodeCleaningFailure if the agent returns invalid results
"""
node = task.node
previous_steps = node.driver_internal_info.get(
'agent_cached_clean_steps')
LOG.debug('Refreshing agent clean step cache for node %(node)s. '
'Previously cached steps: %(steps)s',
{'node': node.uuid, 'steps': previous_steps})
agent_result = self._client.get_clean_steps(node, task.ports).get(
'command_result', {})
missing = set(['clean_steps', 'hardware_manager_version']).difference(
agent_result)
if missing:
raise exception.NodeCleaningFailure(_(
'agent get_clean_steps for node %(node)s returned an invalid '
'result. Keys: %(keys)s are missing from result: %(result)s.')
% ({'node': node.uuid, 'keys': missing,
'result': agent_result}))
# agent_result['clean_steps'] looks like
# {'HardwareManager': [{step1},{steps2}...], ...}
steps = collections.defaultdict(list)
for step_list in agent_result['clean_steps'].values():
for step in step_list:
missing = set(['interface', 'step', 'priority']).difference(
step)
if missing:
raise exception.NodeCleaningFailure(_(
'agent get_clean_steps for node %(node)s returned an '
'invalid clean step. Keys: %(keys)s are missing from '
'step: %(step)s.') % ({'node': node.uuid,
'keys': missing, 'step': step}))
steps[step['interface']].append(step)
# Save hardware manager version, steps, and date
info = node.driver_internal_info
info['hardware_manager_version'] = agent_result[
'hardware_manager_version']
info['agent_cached_clean_steps'] = dict(steps)
info['agent_cached_clean_steps_refreshed'] = str(timeutils.utcnow())
node.driver_internal_info = info
node.save()
LOG.debug('Refreshed agent clean step cache for node %(node)s: '
'%(steps)s', {'node': node.uuid, 'steps': steps})
def continue_cleaning(self, task, **kwargs):
"""Start the next cleaning step if the previous one is complete.
In order to avoid errors and make agent upgrades painless, the agent
compares the version of all hardware managers at the start of the
cleaning (the agent's get_clean_steps() call) and before executing
each clean step. If the version has changed between steps, the agent is
unable to tell if an ordering change will cause a cleaning issue so
it returns CLEAN_VERSION_MISMATCH. For automated cleaning, we restart
the entire cleaning cycle. For manual cleaning, we don't.
Additionally, if a clean_step includes the reboot_requested property
set to True, this method will coordinate the reboot once the step is
completed.
"""
node = task.node
# For manual clean, the target provision state is MANAGEABLE, whereas
# for automated cleaning, it is (the default) AVAILABLE.
manual_clean = node.target_provision_state == states.MANAGEABLE
agent_commands = self._client.get_commands_status(task.node)
if not agent_commands:
if task.node.driver_internal_info.get('cleaning_reboot'):
# Node finished a cleaning step that requested a reboot, and
# this is the first heartbeat after booting. Continue cleaning.
info = task.node.driver_internal_info
info.pop('cleaning_reboot', None)
task.node.driver_internal_info = info
self.notify_conductor_resume_clean(task)
return
else:
# Agent has no commands whatsoever
return
command = self._get_completed_cleaning_command(task, agent_commands)
LOG.debug('Cleaning command status for node %(node)s on step %(step)s:'
' %(command)s', {'node': node.uuid,
'step': node.clean_step,
'command': command})
if not command:
# Agent command in progress
return
if command.get('command_status') == 'FAILED':
msg = (_('Agent returned error for clean step %(step)s on node '
'%(node)s : %(err)s.') %
{'node': node.uuid,
'err': command.get('command_error'),
'step': node.clean_step})
LOG.error(msg)
return manager_utils.cleaning_error_handler(task, msg)
elif command.get('command_status') == 'CLEAN_VERSION_MISMATCH':
# Cache the new clean steps (and 'hardware_manager_version')
try:
self._refresh_clean_steps(task)
except exception.NodeCleaningFailure as e:
msg = (_('Could not continue cleaning on node '
'%(node)s: %(err)s.') %
{'node': node.uuid, 'err': e})
LOG.exception(msg)
return manager_utils.cleaning_error_handler(task, msg)
if manual_clean:
# Don't restart manual cleaning if agent reboots to a new
# version. Both are operator actions, unlike automated
# cleaning. Manual clean steps are not necessarily idempotent
# like automated clean steps and can be even longer running.
LOG.info(_LI('During manual cleaning, node %(node)s detected '
'a clean version mismatch. Re-executing and '
'continuing from current step %(step)s.'),
{'node': node.uuid, 'step': node.clean_step})
driver_internal_info = node.driver_internal_info
driver_internal_info['skip_current_clean_step'] = False
node.driver_internal_info = driver_internal_info
node.save()
else:
# Restart cleaning, agent must have rebooted to new version
LOG.info(_LI('During automated cleaning, node %s detected a '
'clean version mismatch. Resetting clean steps '
'and rebooting the node.'),
node.uuid)
try:
manager_utils.set_node_cleaning_steps(task)
except exception.NodeCleaningFailure:
msg = (_('Could not restart automated cleaning on node '
'%(node)s: %(err)s.') %
{'node': node.uuid,
'err': command.get('command_error'),
'step': node.clean_step})
LOG.exception(msg)
return manager_utils.cleaning_error_handler(task, msg)
self.notify_conductor_resume_clean(task)
elif command.get('command_status') == 'SUCCEEDED':
clean_step_hook = _get_post_clean_step_hook(node)
if clean_step_hook is not None:
LOG.debug('For node %(node)s, executing post clean step '
'hook %(method)s for clean step %(step)s' %
{'method': clean_step_hook.__name__,
'node': node.uuid,
'step': node.clean_step})
try:
clean_step_hook(task, command)
except Exception as e:
msg = (_('For node %(node)s, post clean step hook '
'%(method)s failed for clean step %(step)s.'
'Error: %(error)s') %
{'method': clean_step_hook.__name__,
'node': node.uuid,
'error': e,
'step': node.clean_step})
LOG.exception(msg)
return manager_utils.cleaning_error_handler(task, msg)
if task.node.clean_step.get('reboot_requested'):
self._cleaning_reboot(task)
return
LOG.info(_LI('Agent on node %s returned cleaning command success, '
'moving to next clean step'), node.uuid)
self.notify_conductor_resume_clean(task)
else:
msg = (_('Agent returned unknown status for clean step %(step)s '
'on node %(node)s : %(err)s.') %
{'node': node.uuid,
'err': command.get('command_status'),
'step': node.clean_step})
LOG.error(msg)
return manager_utils.cleaning_error_handler(task, msg)
def _cleaning_reboot(self, task):
"""Reboots a node out of band after a clean step that requires it.
If an agent clean step has 'reboot_requested': True, reboots the
node when the step is completed. Will put the node in CLEANFAIL
if the node cannot be rebooted.
:param task: a TaskManager instance
"""
try:
manager_utils.node_power_action(task, states.REBOOT)
except Exception as e:
msg = (_('Reboot requested by clean step %(step)s failed for '
'node %(node)s: %(err)s') %
{'step': task.node.clean_step,
'node': task.node.uuid,
'err': e})
LOG.error(msg)
# do not set cleaning_reboot if we didn't reboot
manager_utils.cleaning_error_handler(task, msg)
return
# Signify that we've rebooted
driver_internal_info = task.node.driver_internal_info
driver_internal_info['cleaning_reboot'] = True
task.node.driver_internal_info = driver_internal_info
task.node.save()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def heartbeat(self, task, **kwargs):
"""Method for agent to periodically check in.
The agent should be sending its agent_url (so Ironic can talk back)
as a kwarg. kwargs should have the following format::
{
'agent_url': 'http://AGENT_HOST:AGENT_PORT'
}
AGENT_PORT defaults to 9999.
"""
node = task.node
driver_internal_info = node.driver_internal_info
LOG.debug(
'Heartbeat from %(node)s, last heartbeat at %(heartbeat)s.',
{'node': node.uuid,
'heartbeat': driver_internal_info.get('agent_last_heartbeat')})
driver_internal_info['agent_last_heartbeat'] = int(time.time())
try:
driver_internal_info['agent_url'] = kwargs['agent_url']
except KeyError:
raise exception.MissingParameterValue(_('For heartbeat operation, '
'"agent_url" must be '
'specified.'))
node.driver_internal_info = driver_internal_info
node.save()
# Async call backs don't set error state on their own
# TODO(jimrollenhagen) improve error messages here
msg = _('Failed checking if deploy is done.')
try:
if node.maintenance:
# this shouldn't happen often, but skip the rest if it does.
LOG.debug('Heartbeat from node %(node)s in maintenance mode; '
'not taking any action.', {'node': node.uuid})
return
elif (node.provision_state == states.DEPLOYWAIT and
not self.deploy_has_started(task)):
msg = _('Node failed to get image for deploy.')
self.continue_deploy(task, **kwargs)
elif (node.provision_state == states.DEPLOYWAIT and
self.deploy_is_done(task)):
msg = _('Node failed to move to active state.')
self.reboot_to_instance(task, **kwargs)
elif (node.provision_state == states.DEPLOYWAIT and
self.deploy_has_started(task)):
node.touch_provisioning()
elif node.provision_state == states.CLEANWAIT:
node.touch_provisioning()
try:
if not node.clean_step:
LOG.debug('Node %s just booted to start cleaning.',
node.uuid)
msg = _('Node failed to start the first cleaning '
'step.')
# First, cache the clean steps
self._refresh_clean_steps(task)
# Then set/verify node clean steps and start cleaning
manager_utils.set_node_cleaning_steps(task)
self.notify_conductor_resume_clean(task)
else:
msg = _('Node failed to check cleaning progress.')
self.continue_cleaning(task, **kwargs)
except exception.NoFreeConductorWorker:
# waiting for the next heartbeat, node.last_error and
# logging message is filled already via conductor's hook
pass
except Exception as e:
err_info = {'node': node.uuid, 'msg': msg, 'e': e}
last_error = _('Asynchronous exception for node %(node)s: '
'%(msg)s Exception: %(e)s') % err_info
LOG.exception(last_error)
if node.provision_state in (states.CLEANING, states.CLEANWAIT):
manager_utils.cleaning_error_handler(task, last_error)
elif node.provision_state in (states.DEPLOYING, states.DEPLOYWAIT):
deploy_utils.set_failed_state(task, last_error)
@base.driver_passthru(['POST'], async=False)
def lookup(self, context, **kwargs):
"""Find a matching node for the agent.
Method to be called the first time a ramdisk agent checks in. This
can be because this is a node just entering decom or a node that
rebooted for some reason. We will use the mac addresses listed in the
kwargs to find the matching node, then return the node object to the
agent. The agent can that use that UUID to use the node vendor
passthru method.
Currently, we don't handle the instance where the agent doesn't have
a matching node (i.e. a brand new, never been in Ironic node).
kwargs should have the following format::
{
"version": "2"
"inventory": {
"interfaces": [
{
"name": "eth0",
"mac_address": "00:11:22:33:44:55",
"switch_port_descr": "port24",
"switch_chassis_descr": "tor1"
}, ...
], ...
},
"node_uuid": "ab229209-0139-4588-bbe5-64ccec81dd6e"
}
The interfaces list should include a list of the non-IPMI MAC addresses
in the form aa:bb:cc:dd:ee:ff.
node_uuid argument is optional. If it's provided (e.g. as a result of
inspection run before lookup), this method will just return a node and
options.
This method will also return the timeout for heartbeats. The driver
will expect the agent to heartbeat before that timeout, or it will be
considered down. This will be in a root level key called
'heartbeat_timeout'
:raises: NotFound if no matching node is found.
:raises: InvalidParameterValue with unknown payload version
"""
LOG.debug('Agent lookup using data %s', kwargs)
uuid = kwargs.get('node_uuid')
if uuid:
node = objects.Node.get_by_uuid(context, uuid)
else:
inventory = kwargs.get('inventory')
interfaces = self._get_interfaces(inventory)
mac_addresses = self._get_mac_addresses(interfaces)
node = self._find_node_by_macs(context, mac_addresses)
LOG.info(_LI('Initial lookup for node %s succeeded, agent is running '
'and waiting for commands'), node.uuid)
ndict = node.as_dict()
if not context.show_password:
ndict['driver_info'] = ast.literal_eval(
strutils.mask_password(ndict['driver_info'], "******"))
return {
'heartbeat_timeout': CONF.agent.heartbeat_timeout,
'node': ndict,
}
def _get_completed_cleaning_command(self, task, commands):
"""Returns None or a completed cleaning command from the agent.
:param commands: a set of command results from the agent, typically
fetched with agent_client.get_commands_status()
"""
if not commands:
return
last_command = commands[-1]
if last_command['command_name'] != 'execute_clean_step':
# catches race condition where execute_clean_step is still
# processing so the command hasn't started yet
LOG.debug('Expected agent last command to be "execute_clean_step" '
'for node %(node)s, instead got "%(command)s". Waiting '
'for next heartbeat.',
{'node': task.node.uuid,
'command': last_command['command_name']})
return
last_result = last_command.get('command_result') or {}
last_step = last_result.get('clean_step')
if last_command['command_status'] == 'RUNNING':
LOG.debug('Clean step still running for node %(node)s: %(step)s',
{'step': last_step, 'node': task.node.uuid})
return
elif (last_command['command_status'] == 'SUCCEEDED' and
last_step != task.node.clean_step):
# A previous clean_step was running, the new command has not yet
# started.
LOG.debug('Clean step not yet started for node %(node)s: %(step)s',
{'step': last_step, 'node': task.node.uuid})
return
else:
return last_command
def _get_interfaces(self, inventory):
interfaces = []
try:
interfaces = inventory['interfaces']
except (KeyError, TypeError):
raise exception.InvalidParameterValue(_(
'Malformed network interfaces lookup: %s') % inventory)
return interfaces
def _get_mac_addresses(self, interfaces):
"""Returns MACs for the network devices."""
mac_addresses = []
for interface in interfaces:
try:
mac_addresses.append(utils.validate_and_normalize_mac(
interface.get('mac_address')))
except exception.InvalidMAC:
LOG.warning(_LW('Malformed MAC: %s'), interface.get(
'mac_address'))
return mac_addresses
def _find_node_by_macs(self, context, mac_addresses):
"""Get nodes for a given list of MAC addresses.
Given a list of MAC addresses, find the ports that match the MACs
and return the node they are all connected to.
:raises: NodeNotFound if the ports point to multiple nodes or no
nodes.
"""
ports = self._find_ports_by_macs(context, mac_addresses)
if not ports:
raise exception.NodeNotFound(_(
'No ports matching the given MAC addresses %s exist in the '
'database.') % mac_addresses)
node_id = self._get_node_id(ports)
try:
node = objects.Node.get_by_id(context, node_id)
except exception.NodeNotFound:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Could not find matching node for the '
'provided MACs %s.'), mac_addresses)
return node
def _find_ports_by_macs(self, context, mac_addresses):
"""Get ports for a given list of MAC addresses.
Given a list of MAC addresses, find the ports that match the MACs
and return them as a list of Port objects, or an empty list if there
are no matches
"""
ports = []
for mac in mac_addresses:
# Will do a search by mac if the mac isn't malformed
try:
port_ob = objects.Port.get_by_address(context, mac)
ports.append(port_ob)
except exception.PortNotFound:
LOG.warning(_LW('MAC address %s not found in database'), mac)
return ports
def _get_node_id(self, ports):
"""Get a node ID for a list of ports.
Given a list of ports, either return the node_id they all share or
raise a NotFound if there are multiple node_ids, which indicates some
ports are connected to one node and the remaining port(s) are connected
to one or more other nodes.
:raises: NodeNotFound if the MACs match multiple nodes. This
could happen if you swapped a NIC from one server to another and
don't notify Ironic about it or there is a MAC collision (since
they're not guaranteed to be unique).
"""
# See if all the ports point to the same node
node_ids = set(port_ob.node_id for port_ob in ports)
if len(node_ids) > 1:
raise exception.NodeNotFound(_(
'Ports matching mac addresses match multiple nodes. MACs: '
'%(macs)s. Port ids: %(port_ids)s') %
{'macs': [port_ob.address for port_ob in ports], 'port_ids':
[port_ob.uuid for port_ob in ports]}
)
# Only have one node_id left, return it.
return node_ids.pop()
def _log_and_raise_deployment_error(self, task, msg):
"""Helper method to log the error and raise exception."""
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(msg)
def reboot_and_finish_deploy(self, task):
"""Helper method to trigger reboot on the node and finish deploy.
This method initiates a reboot on the node. On success, it
marks the deploy as complete. On failure, it logs the error
and marks deploy as failure.
:param task: a TaskManager object containing the node
:raises: InstanceDeployFailure, if node reboot failed.
"""
wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.agent.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)
node = task.node
# Whether ironic should power off the node via out-of-band or
# in-band methods
oob_power_off = strutils.bool_from_string(
node.driver_info.get('deploy_forces_oob_reboot', False))
try:
if not oob_power_off:
try:
self._client.power_off(node)
_wait_until_powered_off(task)
except Exception as e:
LOG.warning(
_LW('Failed to soft power off node %(node_uuid)s '
'in at least %(timeout)d seconds. '
'Error: %(error)s'),
{'node_uuid': node.uuid,
'timeout': (wait * (attempts - 1)) / 1000,
'error': e})
manager_utils.node_power_action(task, states.POWER_OFF)
else:
# Flush the file system prior to hard rebooting the node
result = self._client.sync(node)
error = result.get('faultstring')
if error:
if 'Unknown command' in error:
error = _('The version of the IPA ramdisk used in '
'the deployment do not support the '
'command "sync"')
LOG.warning(_LW(
'Failed to flush the file system prior to hard '
'rebooting the node %(node)s. Error: %(error)s'),
{'node': node.uuid, 'error': error})
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.node_power_action(task, states.POWER_ON)
except Exception as e:
msg = (_('Error rebooting node %(node)s after deploy. '
'Error: %(error)s') %
{'node': node.uuid, 'error': e})
self._log_and_raise_deployment_error(task, msg)
task.process_event('done')
LOG.info(_LI('Deployment to node %s done'), task.node.uuid)
def prepare_instance_to_boot(self, task, root_uuid, efi_sys_uuid):
"""Prepares instance to boot.
:param task: a TaskManager object containing the node
:param root_uuid: the UUID for root partition
:param efi_sys_uuid: the UUID for the efi partition
:raises: InvalidState if fails to prepare instance
"""
node = task.node
if deploy_utils.get_boot_option(node) == "local":
# Install the boot loader
self.configure_local_boot(
task, root_uuid=root_uuid,
efi_system_part_uuid=efi_sys_uuid)
try:
task.driver.boot.prepare_instance(task)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
msg = _('Failed to continue agent deployment.')
self._log_and_raise_deployment_error(task, msg)
def configure_local_boot(self, task, root_uuid=None,
efi_system_part_uuid=None):
"""Helper method to configure local boot on the node.
This method triggers bootloader installation on the node.
On successful installation of bootloader, this method sets the
node to boot from disk.
:param task: a TaskManager object containing the node
:param root_uuid: The UUID of the root partition. This is used
for identifying the partition which contains the image deployed
or None in case of whole disk images which we expect to already
have a bootloader installed.
:param efi_system_part_uuid: The UUID of the efi system partition.
This is used only in uefi boot mode.
:raises: InstanceDeployFailure if bootloader installation failed or
on encountering error while setting the boot device on the node.
"""
node = task.node
LOG.debug('Configuring local boot for node %s', node.uuid)
if not node.driver_internal_info.get(
'is_whole_disk_image') and root_uuid:
LOG.debug('Installing the bootloader for node %(node)s on '
'partition %(part)s, EFI system partition %(efi)s',
{'node': node.uuid, 'part': root_uuid,
'efi': efi_system_part_uuid})
result = self._client.install_bootloader(
node, root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid)
if result['command_status'] == 'FAILED':
msg = (_("Failed to install a bootloader when "
"deploying node %(node)s. Error: %(error)s") %
{'node': node.uuid,
'error': result['command_error']})
self._log_and_raise_deployment_error(task, msg)
try:
deploy_utils.try_set_boot_device(task, boot_devices.DISK)
except Exception as e:
msg = (_("Failed to change the boot device to %(boot_dev)s "
"when deploying node %(node)s. Error: %(error)s") %
{'boot_dev': boot_devices.DISK, 'node': node.uuid,
'error': e})
self._log_and_raise_deployment_error(task, msg)
LOG.info(_LI('Local boot successfully configured for node %s'),
node.uuid)
|
|
#!/usr/bin/env python
"""
Perform data manipulation tasks in project workflow
"""
import os
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import boto
import numpy as np
import pandas as pd
import boto.s3
from boto.s3.key import Key
from scipy.stats import randint as sp_randint
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
__author__ = "Pearl Philip"
__credits__ = "David Beck"
__license__ = "BSD 3-Clause License"
__maintainer__ = "Pearl Philip"
__email__ = "pphilip@uw.edu"
__status__ = "Development"
def create_notation_dataframe(filename):
"""
Returning Pandas dataframe of sample ID and molecular notation.
:param filename: File object containing molecular notation indexed by sample ID
:return: Dataframe of molecular notation indexed by sample ID.
"""
df = []
for line in filename:
# Splits the line into it's key and molecular string
words = line.split()
z = [int(words[0]), words[1]]
df.append(z)
df = pd.DataFrame(df)
df.columns = ['CID', 'SMILES']
df.sort_values(by='CID', inplace=True)
return df
def create_activity_dataframe(dataframe):
"""
Performing useful transformations on the acquired data for use in subsequent algorithm.
:param dataframe: Dataframe downloaded from NCBI database.
:return: df: Cleaned and sorted dataframe.
"""
# Eliminates first five text rows of csv
for j in range(5):
df = dataframe.drop(j, axis=0)
df = df.drop(['PUBCHEM_ACTIVITY_URL', 'PUBCHEM_RESULT_TAG',
'PUBCHEM_ACTIVITY_SCORE', 'PUBCHEM_SID',
'PUBCHEM_ASSAYDATA_COMMENT', 'Potency',
'Efficacy', 'Analysis Comment',
'Curve_Description', 'Fit_LogAC50',
'Fit_HillSlope', 'Fit_R2', 'Fit_ZeroActivity',
'Fit_CurveClass', 'Excluded_Points', 'Compound QC',
'Max_Response', 'Phenotype', 'Activity at 0.457 uM',
'Activity at 2.290 uM', 'Activity at 11.40 uM',
'Activity at 57.10 uM', 'PUBCHEM_ACTIVITY_OUTCOME',
'Fit_InfiniteActivity'], axis=1)
df.rename(columns={'PUBCHEM_CID': 'CID'}, inplace=True)
# Eliminates duplicate compound rows
df['dupes'] = df.duplicated('CID')
df = df[df['dupes'] == 0].drop(['dupes'], axis=1)
df = df.sort_values(by='CID')
return df
def upload_to_s3(aws_access_key_id, aws_secret_access_key, file_to_s3, bucket, key, callback=None, md5=None,
reduced_redundancy=False, content_type=None):
"""
Uploads the given file to the AWS S3 bucket and key specified.
:param aws_access_key_id: First part of AWS access key.
:param aws_secret_access_key: Second part of AWS access key.
:param file_to_s3: File object to be uploaded.
:param bucket: S3 bucket name as string.
:param key: Name attribute of the file object to be uploaded.
:param callback: Function accepts two integer parameters, the first representing the number of bytes that have been
successfully transmitted to S3 and the second representing the size of the to be transmitted object. Returns
boolean indicating success/failure of upload.
:param md5: MD5 checksum value to verify the integrity of the object.
:param reduced_redundancy: S3 option that enables customers to reduce their costs
by storing noncritical, reproducible data at lower levels of redundancy than S3's standard storage.
:param content_type: Set the type of content in file object.
:return: Boolean indicating success of upload.
"""
try:
size = os.fstat(file_to_s3.fileno()).st_size
except:
# Not all file objects implement fileno(), so we fall back on this
file_to_s3.seek(0, os.SEEK_END)
size = file_to_s3.tell()
conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
bucket = conn.get_bucket(bucket, validate=True)
k = Key(bucket)
k.key = key
if content_type:
k.set_metadata('Content-Type', content_type)
sent = k.set_contents_from_file(file_to_s3, cb=callback, md5=md5,
reduced_redundancy=reduced_redundancy, rewind=True)
# Rewind for later use
file_to_s3.seek(0)
if sent == size:
return True
return False
def join_dataframes():
"""
Joining the dataframes of existing descriptor files from their urls into a single dataframe.
:return: Dataframe after join over key column.
"""
url_list = ['https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_constitution.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_con.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_kappa.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_estate.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_basak.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_property.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_charge.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_moe.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_burden.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_geary.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_moran.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_topology.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_geometric.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_cpsa.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_rdf.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_morse.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_whim.csv'
]
url_exist_list = []
for url in url_list:
try:
r = urllib2.urlopen(url)
except urllib2.URLError as e:
r = e
if r.code < 400:
url_exist_list.append(url)
i = 0
df = [0] * len(url_exist_list)
for url in url_exist_list:
df[i] = pd.read_csv(url)
df[i].drop(df[i].columns[0], axis=1, inplace=True)
df[i].reset_index(drop=True, inplace=True)
i += 1
joined_df = df[0]
for i in df[1:]:
joined_df = joined_df.join(i)
return joined_df
def choose_features(x_train, y_train, x_test, column_names):
"""
Selecting the features of high importance to reduce feature space.
:param x_train: Training set of features.
:param x_test: Test set of features.
:param y_train: Training target values
:param column_names: Names of columns in x
"""
# Random forest feature importance
clf = RandomForestRegressor(n_jobs=-1, random_state=1, n_estimators=10)
clf.fit(x_train, y_train.ravel())
feature_importance = clf.feature_importances_
scores_table = pd.DataFrame({'feature': column_names, 'scores':
feature_importance}).sort_values(by=['scores'], ascending=False)
scores = scores_table['scores'].tolist()
n_features = [25, 50, 75, 100, 150, 200, 250, 300]
for n in n_features:
feature_scores = scores_table['feature'].tolist()
selected_features = feature_scores[:n]
x_train = pd.DataFrame(x_train, columns=column_names)
desired_x_train = x_train[selected_features]
x_test = pd.DataFrame(x_test, columns=column_names)
desired_x_test = x_test[selected_features]
desired_x_train.to_csv('../data/x_train_postprocessing_rfr_%d.csv' % n)
desired_x_test.to_csv('../data/x_test_postprocessing_rfr_%d.csv' % n)
pd.DataFrame(scores).to_csv('../data/feature_scores_rfr.csv')
return
def change_nan_infinite(dataframe):
"""
Replacing NaN and infinite values from the dataframe with zeros.
:param dataframe: Dataframe containing NaN and infinite values.
:return data: Data with no NaN or infinite values.
"""
dataframe.replace([np.inf, -np.inf], np.nan, inplace=True)
data = dataframe.fillna(0)
return data
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
str_to_int,
unescapeHTML,
unified_strdate,
url_or_none,
)
from ..aes import aes_decrypt_text
class YouPornIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?youporn\.com/(?:watch|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'md5': '3744d24c50438cf5b6f6d59feb5055c2',
'info_dict': {
'id': '505835',
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
'ext': 'mp4',
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 210,
'uploader': 'Ask Dan And Jennifer',
'upload_date': '20101217',
'average_rating': int,
'view_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
}, {
# Unknown uploader
'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4',
'info_dict': {
'id': '561726',
'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show',
'ext': 'mp4',
'title': 'Big Tits Awesome Brunette On amazing webcam show',
'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Unknown',
'upload_date': '20110418',
'average_rating': int,
'view_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
'params': {
'skip_download': True,
},
'skip': '404',
}, {
'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'only_matching': True,
}, {
'url': 'http://www.youporn.com/watch/505835',
'only_matching': True,
}, {
'url': 'https://www.youporn.com/watch/13922959/femdom-principal/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)',
webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(
'http://www.youporn.com/watch/%s' % video_id, display_id,
headers={'Cookie': 'age_verified=1'})
title = self._html_search_regex(
r'(?s)<div[^>]+class=["\']watchVideoTitle[^>]+>(.+?)</div>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None) or self._html_search_meta(
'title', webpage, fatal=True)
links = []
# Main source
definitions = self._parse_json(
self._search_regex(
r'mediaDefinition\s*[=:]\s*(\[.+?\])\s*[;,]', webpage,
'media definitions', default='[]'),
video_id, fatal=False)
if definitions:
for definition in definitions:
if not isinstance(definition, dict):
continue
video_url = url_or_none(definition.get('videoUrl'))
if video_url:
links.append(video_url)
# Fallback #1, this also contains extra low quality 180p format
for _, link in re.findall(r'<a[^>]+href=(["\'])(http(?:(?!\1).)+\.mp4(?:(?!\1).)*)\1[^>]+title=["\']Download [Vv]ideo', webpage):
links.append(link)
# Fallback #2 (unavailable as at 22.06.2017)
sources = self._search_regex(
r'(?s)sources\s*:\s*({.+?})', webpage, 'sources', default=None)
if sources:
for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources):
links.append(link)
# Fallback #3 (unavailable as at 22.06.2017)
for _, link in re.findall(
r'(?:videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage):
links.append(link)
# Fallback #4, encrypted links (unavailable as at 22.06.2017)
for _, encrypted_link in re.findall(
r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage):
links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8'))
formats = []
for video_url in set(unescapeHTML(link) for link in links):
f = {
'url': video_url,
}
# Video URL's path looks like this:
# /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
# /201012/17/505835/vl_240p_240k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
# /videos/201703/11/109285532/1080P_4000K_109285532.mp4
# We will benefit from it by extracting some metadata
mobj = re.search(r'(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
if mobj:
height = int(mobj.group('height'))
bitrate = int(mobj.group('bitrate'))
f.update({
'format_id': '%dp-%dk' % (height, bitrate),
'height': height,
'tbr': bitrate,
})
formats.append(f)
self._sort_formats(formats)
description = self._html_search_regex(
r'(?s)<div[^>]+\bid=["\']description["\'][^>]*>(.+?)</div>',
webpage, 'description',
default=None) or self._og_search_description(
webpage, default=None)
thumbnail = self._search_regex(
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration', fatal=False))
uploader = self._html_search_regex(
r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
[r'UPLOADED:\s*<span>([^<]+)',
r'Date\s+[Aa]dded:\s*<span>([^<]+)',
r'(?s)<div[^>]+class=["\']videoInfo(?:Date|Time)["\'][^>]*>(.+?)</div>'],
webpage, 'upload date', fatal=False))
age_limit = self._rta_search(webpage)
average_rating = int_or_none(self._search_regex(
r'<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
webpage, 'average rating', fatal=False))
view_count = str_to_int(self._search_regex(
r'(?s)<div[^>]+class=(["\']).*?\bvideoInfoViews\b.*?\1[^>]*>.*?(?P<count>[\d,.]+)<',
webpage, 'view count', fatal=False, group='count'))
comment_count = str_to_int(self._search_regex(
r'>All [Cc]omments? \(([\d,.]+)\)',
webpage, 'comment count', default=None))
def extract_tag_box(regex, title):
tag_box = self._search_regex(regex, webpage, title, default=None)
if not tag_box:
return []
return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box)
categories = extract_tag_box(
r'(?s)Categories:.*?</[^>]+>(.+?)</div>', 'categories')
tags = extract_tag_box(
r'(?s)Tags:.*?</div>\s*<div[^>]+class=["\']tagBoxContent["\'][^>]*>(.+?)</div>',
'tags')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'average_rating': average_rating,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
'age_limit': age_limit,
'formats': formats,
}
|
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition.pca import PCA, RandomizedPCA
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline,
[('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that appart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('scaler', scaler), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("pca", pca), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different pca object to control the random_state stream
fs = FeatureUnion([("pca", pca), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_feature_names():
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
# CFString.h
kCFStringEncodingMacRoman = 0
kCFStringEncodingWindowsLatin1 = 0x0500
kCFStringEncodingISOLatin1 = 0x0201
kCFStringEncodingNextStepLatin = 0x0B01
kCFStringEncodingASCII = 0x0600
kCFStringEncodingUnicode = 0x0100
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingNonLossyASCII = 0x0BFF
# MacTypes.h
noErr = 0
# CarbonEventsCore.h
eventLoopTimedOutErr = -9875
# MacApplication.h
kUIModeNormal = 0
kUIModeContentSuppressed = 1
kUIModeContentHidden = 2
kUIModeAllSuppressed = 4
kUIModeAllHidden = 3
kUIOptionAutoShowMenuBar = 1 << 0
kUIOptionDisableAppleMenu = 1 << 2
kUIOptionDisableProcessSwitch = 1 << 3
kUIOptionDisableForceQuit = 1 << 4
kUIOptionDisableSessionTerminate = 1 << 5
kUIOptionDisableHide = 1 << 6
# MacWindows.h
kAlertWindowClass = 1
kMovableAlertWindowClass = 2
kModalWindowClass = 3
kMovableModalWindowClass = 4
kFloatingWindowClass = 5
kDocumentWindowClass = 6
kUtilityWindowClass = 8
kHelpWindowClass = 10
kSheetWindowClass = 11
kToolbarWindowClass = 12
kPlainWindowClass = 13
kOverlayWindowClass = 14
kSheetAlertWindowClass = 15
kAltPlainWindowClass = 16
kSimpleWindowClass = 18 # no window frame
kDrawerWindowClass = 20
kWindowNoAttributes = 0x0
kWindowCloseBoxAttribute = 0x1
kWindowHorizontalZoomAttribute = 0x2
kWindowVerticalZoomAttribute = 0x4
kWindowFullZoomAttribute = kWindowHorizontalZoomAttribute | \
kWindowVerticalZoomAttribute
kWindowCollapseBoxAttribute = 0x8
kWindowResizableAttribute = 0x10
kWindowSideTitlebarAttribute = 0x20
kWindowToolbarAttribute = 0x40
kWindowMetalAttribute = 1 << 8
kWindowDoesNotCycleAttribute = 1 << 15
kWindowNoupdatesAttribute = 1 << 16
kWindowNoActivatesAttribute = 1 << 17
kWindowOpaqueForEventsAttribute = 1 << 18
kWindowCompositingAttribute = 1 << 19
kWindowNoShadowAttribute = 1 << 21
kWindowHideOnSuspendAttribute = 1 << 24
kWindowAsyncDragAttribute = 1 << 23
kWindowStandardHandlerAttribute = 1 << 25
kWindowHideOnFullScreenAttribute = 1 << 26
kWindowInWindowMenuAttribute = 1 << 27
kWindowLiveResizeAttribute = 1 << 28
kWindowIgnoreClicksAttribute = 1 << 29
kWindowNoConstrainAttribute = 1 << 31
kWindowStandardDocumentAttributes = kWindowCloseBoxAttribute | \
kWindowFullZoomAttribute | \
kWindowCollapseBoxAttribute | \
kWindowResizableAttribute
kWindowStandardFloatingAttributes = kWindowCloseBoxAttribute | \
kWindowCollapseBoxAttribute
kWindowCenterOnMainScreen = 1
kWindowCenterOnParentWindow = 2
kWindowCenterOnParentWindowScreen = 3
kWindowCascadeOnMainScreen = 4
kWindowCascadeOnParentWindow = 5
kWindowCascadeonParentWindowScreen = 6
kWindowCascadeStartAtParentWindowScreen = 10
kWindowAlertPositionOnMainScreen = 7
kWindowAlertPositionOnParentWindow = 8
kWindowAlertPositionOnParentWindowScreen = 9
kWindowTitleBarRgn = 0
kWindowTitleTextRgn = 1
kWindowCloseBoxRgn = 2
kWindowZoomBoxRgn = 3
kWindowDragRgn = 5
kWindowGrowRgn = 6
kWindowCollapseBoxRgn = 7
kWindowTitleProxyIconRgn = 8
kWindowStructureRgn = 32
kWindowContentRgn = 33
kWindowUpdateRgn = 34
kWindowOpaqueRgn = 35
kWindowGlobalPortRgn = 40
kWindowToolbarButtonRgn = 41
inDesk = 0
inNoWindow = 0
inMenuBar = 1
inSysWindow = 2
inContent = 3
inDrag = 4
inGrow = 5
inGoAway = 6
inZoomIn = 7
inZoomOut = 8
inCollapseBox = 11
inProxyIcon = 12
inToolbarButton = 13
inStructure = 15
def _name(name):
return ord(name[0]) << 24 | \
ord(name[1]) << 16 | \
ord(name[2]) << 8 | \
ord(name[3])
# AEDataModel.h
typeBoolean = _name('bool')
typeChar = _name('TEXT')
typeSInt16 = _name('shor')
typeSInt32 = _name('long')
typeUInt32 = _name('magn')
typeSInt64 = _name('comp')
typeIEEE32BitFloatingPoint = _name('sing')
typeIEEE64BitFloatingPoint = _name('doub')
type128BitFloatingPoint = _name('ldbl')
typeDecimalStruct = _name('decm')
# AERegistry.h
typeUnicodeText = _name('utxt')
typeStyledUnicodeText = _name('sutx')
typeUTF8Text = _name('utf8')
typeEncodedString = _name('encs')
typeCString = _name('cstr')
typePString = _name('pstr')
typeEventRef = _name('evrf')
# CarbonEvents.h
kEventParamWindowRef = _name('wind')
kEventParamWindowPartCode = _name('wpar')
kEventParamGrafPort = _name('graf')
kEventParamMenuRef = _name('menu')
kEventParamEventRef = _name('evnt')
kEventParamControlRef = _name('ctrl')
kEventParamRgnHandle = _name('rgnh')
kEventParamEnabled = _name('enab')
kEventParamDimensions = _name('dims')
kEventParamBounds = _name('boun')
kEventParamAvailableBounds = _name('avlb')
#kEventParamAEEventID = keyAEEventID
#kEventParamAEEventClass = keyAEEventClass
kEventParamCGContextRef = _name('cntx')
kEventParamDeviceDepth = _name('devd')
kEventParamDeviceColor = _name('devc')
kEventParamMutableArray = _name('marr')
kEventParamResult = _name('ansr')
kEventParamMinimumSize = _name('mnsz')
kEventParamMaximumSize = _name('mxsz')
kEventParamAttributes = _name('attr')
kEventParamReason = _name('why?')
kEventParamTransactionID = _name('trns')
kEventParamGDevice = _name('gdev')
kEventParamIndex = _name('indx')
kEventParamUserData = _name('usrd')
kEventParamShape = _name('shap')
typeWindowRef = _name('wind')
typeWindowPartCode = _name('wpar')
typeGrafPtr = _name('graf')
typeGWorldPtr = _name('gwld')
typeMenuRef = _name('menu')
typeControlRef = _name('ctrl')
typeCollection = _name('cltn')
typeQDRgnHandle = _name('rgnh')
typeOSStatus = _name('osst')
typeCFIndex = _name('cfix')
typeCGContextRef = _name('cntx')
typeQDPoint = _name('QDpt')
typeHICommand = _name('hcmd')
typeHIPoint = _name('hipt')
typeHISize = _name('hisz')
typeHIRect = _name('hirc')
typeHIShapeRef = _name('shap')
typeVoidPtr = _name('void')
typeGDHandle = _name('gdev')
kCoreEventClass = _name('aevt')
kEventClassMouse = _name('mous')
kEventClassKeyboard = _name('keyb')
kEventClassTextInput = _name('text')
kEventClassApplication = _name('appl')
kEventClassAppleEvent = _name('eppc')
kEventClassMenu = _name('menu')
kEventClassWindow = _name('wind')
kEventClassControl = _name('cntl')
kEventClassCommand = _name('cmds')
kEventClassTablet = _name('tblt')
kEventClassVolume = _name('vol ')
kEventClassAppearance = _name('appm')
kEventClassService = _name('serv')
kEventClassToolbar = _name('tbar')
kEventClassToolbarItem = _name('tbit')
kEventClassToolbarItemView = _name('tbiv')
kEventClassAccessibility = _name('acce')
kEventClassSystem = _name('macs')
kEventClassInk = _name('ink ')
kEventClassTSMDocumentAccess = _name('tdac')
# Appearance.h
kThemeArrowCursor = 0
kThemeCopyArrowCursor = 1
kThemeAliasArrowCursor = 2
kThemeContextualMenuArrowCursor = 3
kThemeIBeamCursor = 4
kThemeCrossCursor = 5
kThemePlusCursor = 6
kThemeWatchCursor = 7
kThemeClosedHandCursor = 8
kThemeOpenHandCursor = 9
kThemePointingHandCursor = 10
kThemeCountingUpHandCursor = 11
kThemeCountingDownHandCursor = 12
kThemeCountingUpAndDownHandCursor = 13
kThemeSpinningCursor = 14
kThemeResizeLeftCursor = 15
kThemeResizeRightCursor = 16
kThemeResizeLeftRightCursor = 17
kThemeNotAllowedCursor = 18
kThemeResizeUpCursor = 19
kThemeResizeDownCursor = 20
kThemeResizeUpDownCursor = 21
kThemePoofCursor = 22
# AE
kEventAppleEvent = 1
kEventAppQuit = 3
kAEQuitApplication = _name('quit')
# Commands
kEventProcessCommand = 1
kEventParamHICommand = _name('hcmd')
kEventParamDirectObject = _name('----')
kHICommandQuit = _name('quit')
# Keyboard
kEventRawKeyDown = 1
kEventRawKeyRepeat = 2
kEventRawKeyUp = 3
kEventRawKeyModifiersChanged = 4
kEventHotKeyPressed = 5
kEventHotKeyReleased = 6
kEventParamKeyCode = _name('kcod')
kEventParamKeyMacCharCodes = _name('kchr')
kEventParamKeyModifiers = _name('kmod')
kEventParamKeyUnicodes = _name('kuni')
kEventParamKeyboardType = _name('kbdt')
typeEventHotKeyID = _name('hkid')
activeFlagBit = 0
btnStateBit = 7
cmdKeyBit = 8
shiftKeyBit = 9
alphaLockBit = 10
optionKeyBit = 11
controlKeyBit = 12
rightShiftKeyBit = 13
rightOptionKeyBit = 14
rightControlKeyBit = 15
numLockBit = 16
activeFlag = 1 << activeFlagBit
btnState = 1 << btnStateBit
cmdKey = 1 << cmdKeyBit
shiftKey = 1 << shiftKeyBit
alphaLock = 1 << alphaLockBit
optionKey = 1 << optionKeyBit
controlKey = 1 << controlKeyBit
rightShiftKey = 1 << rightShiftKeyBit
rightOptionKey = 1 << rightOptionKeyBit
rightControlKey = 1 << rightControlKeyBit
numLock = 1 << numLockBit
# TextInput
kEventTextInputUpdateActiveInputArea = 1
kEventTextInputUnicodeForKeyEvent = 2
kEventTextInputOffsetToPos = 3
kEventTextInputPosToOffset = 4
kEventTextInputShowHideBottomWindow = 5
kEventTextInputGetSelectedText = 6
kEventTextInputUnicodeText = 7
kEventParamTextInputSendText = _name('tstx')
kEventParamTextInputSendKeyboardEvent = _name('tske')
# Mouse
kEventMouseDown = 1
kEventMouseUp = 2
kEventMouseMoved = 5
kEventMouseDragged = 6
kEventMouseEntered = 8
kEventMouseExited = 9
kEventMouseWheelMoved = 10
kEventParamMouseLocation = _name('mloc')
kEventParamWindowMouseLocation = _name('wmou')
kEventParamMouseButton = _name('mbtn')
kEventParamClickCount = _name('ccnt')
kEventParamMouseWheelAxis = _name('mwax')
kEventParamMouseWheelDelta = _name('mwdl')
kEventParamMouseDelta = _name('mdta')
kEventParamMouseChord = _name('chor')
kEventParamTabletEventType = _name('tblt')
kEventParamMouseTrackingRef = _name('mtrf')
typeMouseButton = _name('mbtn')
typeMouseWheelAxis = _name('mwax')
typeMouseTrackingRef = _name('mtrf')
kMouseTrackingOptionsLocalClip = 0
kMouseTrackingOptionsGlobalClip = 1
kEventMouseButtonPrimary = 1
kEventMouseButtonSecondary = 2
kEventMouseButtonTertiary = 3
kEventMouseWheelAxisX = 0
kEventMouseWheelAxisY = 1
DEFAULT_CREATOR_CODE = _name('PYGL') # <ah> this is registered for Pyglet
# apps. register your own at:
# http://developer.apple.com/datatype
# Window
kEventWindowUpdate = 1
kEventWindowDrawContent = 2
# -- window activation events --
kEventWindowActivated = 5
kEventWindowDeactivated = 6
kEventWindowHandleActivate = 91
kEventWindowHandleDeactivate = 92
kEventWindowGetClickActivation = 7
kEventWindowGetClickModality = 8
# -- window state change events --
kEventWindowShowing = 22
kEventWindowHiding = 23
kEventWindowShown = 24
kEventWindowHidden = 25
kEventWindowCollapsing = 86
kEventWindowCollapsed = 67
kEventWindowExpanding = 87
kEventWindowExpanded = 70
kEventWindowZoomed = 76
kEventWindowBoundsChanging = 26
kEventWindowBoundsChanged = 27
kEventWindowResizeStarted = 28
kEventWindowResizeCompleted = 29
kEventWindowDragStarted = 30
kEventWindowDragCompleted = 31
kEventWindowClosed = 73
kEventWindowTransitionStarted = 88
kEventWindowTransitionCompleted = 89
# -- window click events --
kEventWindowClickDragRgn = 32
kEventWindowClickResizeRgn = 33
kEventWindowClickCollapseRgn = 34
kEventWindowClickCloseRgn = 35
kEventWindowClickZoomRgn = 36
kEventWindowClickContentRgn = 37
kEventWindowClickProxyIconRgn = 38
kEventWindowClickToolbarButtonRgn = 41
kEventWindowClickStructureRgn = 42
# -- window cursor change events --
kEventWindowCursorChange = 40
# -- window action events --
kEventWindowCollapse = 66
kEventWindowCollapsed = 67
kEventWindowCollapseAll = 68
kEventWindowExpand = 69
kEventWindowExpanded = 70
kEventWindowExpandAll = 71
kEventWindowClose = 72
kEventWindowClosed = 73
kEventWindowCloseAll = 74
kEventWindowZoom = 75
kEventWindowZoomed = 76
kEventWindowZoomAll = 77
kEventWindowContextualMenuSelect = 78
kEventWindowPathSelect = 79
kEventWindowGetIdealSize = 80
kEventWindowGetMinimumSize = 81
kEventWindowGetMaximumSize = 82
kEventWindowConstrain = 83
kEventWindowHandleContentClick = 85
kEventWindowCollapsing = 86
kEventWindowExpanding = 87
kEventWindowTransitionStarted = 88
kEventWindowTransitionCompleted = 89
kEventWindowGetDockTileMenu = 90
kEventWindowHandleActivate = 91
kEventWindowHandleDeactivate = 92
kEventWindowProxyBeginDrag = 128
kEventWindowProxyEndDrag = 129
kEventWindowToolbarSwitchMode = 150
# -- window focus events --
kEventWindowFocusAcquired = 200
kEventWindowFocusRelinquish = 201
kEventWindowFocusContent = 202
kEventWindowFocusToolbar = 203
kEventWindowFocusDrawer = 204
# -- sheet events --
kEventWindowSheetOpening = 210
kEventWindowSheetOpened = 211
kEventWindowSheetClosing = 212
kEventWindowSheetClosed = 213
# -- drawer events --
kEventWindowDrawerOpening = 220
kEventWindowDrawerOpened = 221
kEventWindowDrawerClosing = 222
kEventWindowDrawerClosed = 223
# -- window definition events --
kEventWindowDrawFrame = 1000
kEventWindowDrawPart = 1001
kEventWindowGetRegion = 1002
kEventWindowHitTest = 1003
kEventWindowInit = 1004
kEventWindowDispose = 1005
kEventWindowDragHilite = 1006
kEventWindowModified = 1007
kEventWindowSetupProxyDragImage = 1008
kEventWindowStateChanged = 1009
kEventWindowMeasureTitle = 1010
kEventWindowDrawGrowBox = 1011
kEventWindowGetGrowImageRegion = 1012
kEventWindowPaint = 1013
# Process.h
kNoProcess = 0
kSystemProcess = 1
kCurrentProcess = 2
# CGColorSpace.h
kCGRenderingIntentDefault = 0
# CGImage.h
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGImageAlphaPremultipliedFirst = 2
kCGImageAlphaLast = 3
kCGImageAlphaFirst = 4
kCGImageAlphaNoneSkipLast = 5
kCGImageAlphaNoneSkipFirst = 6
kCGImageAlphaOnly = 7
|
|
# 6.00 Problem Set 3A Solutions
#
# The 6.00 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
#
#
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print(" ", len(wordlist), "words loaded.")
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def get_word_score(word, n):
"""
Returns the score for a word. Assumes the word is a
valid word.
The score for a word is the sum of the points for letters
in the word multiplied by the length of the word, plus 50
points if all n letters are used on the first go.
Letters are scored as in Scrabble; A is worth 1, B is
worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
word: string (lowercase letters)
returns: int >= 0
"""
# TO DO...
result = 0
for c in word:
# print("letter:",c)
result = result + SCRABBLE_LETTER_VALUES[c]
result = result * len(word)
if len(word) is n:
result = result + 50
# print("result:", result)
return result
#
# Make sure you understand how this function works and what it does!
#
def display_hand(hand):
"""
Displays the letters currently in the hand.
For example:
display_hand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
handLetters = ""
for letter in hand.keys():
for j in range(hand[letter]):
handLetters = handLetters + letter + " "# print(letter,) # print all on the same line
# print() # print an empty line
return handLetters
#
# Make sure you understand how this function works and what it does!
#
def deal_hand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
num_vowels = round(n / 3)
for i in range(num_vowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def update_hand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
# TO DO ...
for c in word:
hand[c] = hand.get(c, 0) - 1
if hand[c] is 0:
del hand[c]
return hand
#
# Problem #3: Test word validity
#
def is_valid_word(word, hand, word_list):
"""
Returns True if word is in the word_list and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or word_list.
word: string
hand: dictionary (string -> int)
word_list: list of lowercase strings
"""
# TO DO...
checkHand = hand.copy()
for c in word:
if checkHand.get(c) is None:
return False
else:
checkHand[c] = checkHand[c] - 1
if checkHand[c] < 0:
return False
if word in word_list:
return True
else:
return False
def calculate_handlen(hand):
handlen = 0
for v in hand.values():
handlen += v
return handlen
#
# Problem #4: Playing a hand
#
def play_hand(word_list, hand):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word.
* An invalid word is rejected, and a message is displayed asking
the user to choose another word.
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters.
The user can also finish playing the hand by inputing a single
period (the string '.') instead of a word.
hand: dictionary (string -> int)
word_list: list of lowercase strings
"""
# TO DO ...
# if hand is None:
# hand = deal_hand(HAND_SIZE)
userWord = ""
score = 0
while userWord is not ".":
print("Current hand: ", display_hand(hand))
userWord = input("Enter word, or a \".\" to indicate that you are finished: ")
if userWord is ".":
print("Total score: ", score )
break
if is_valid_word(userWord, hand, word_list) is False:
print("Invalid word, please try again")
else:
tempScore = get_word_score(userWord, len(userWord))
score = score + tempScore
update_hand(hand, userWord)
print("\"" + userWord + "\"" + " earned " + str(tempScore) + " points. Total: " + str(score) + " points")
#
# Problem #5: Playing a game
# Make sure you understand how this code works!
#
def play_game(word_list):
"""
Allow the user to play an arbitrary number of hands.
* Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
When done playing the hand, ask the 'n' or 'e' question again.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, ask them again.
"""
# TO DO...
userWord = ""
hand = {}
previousHand = {}
while userWord is not "e":
userWord = input("Welcome to word game, if you want to play a random hand type \"n\", if you want to play the last hand again type \"r\". If you want to exit just type \"e\": ")
if userWord is "n":
hand = deal_hand(HAND_SIZE)
previousHand = hand.copy()
play_hand(word_list, hand)
elif userWord is "r":
if bool(hand) is False:
hand = deal_hand(HAND_SIZE)
previousHand = hand.copy()
play_hand(word_list, hand)
tempHand = previousHand.copy()
play_hand(word_list, tempHand)
exit()
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
word_list = load_words()
#hand = deal_hand(HAND_SIZE)
#play_hand(word_list, hand)
play_game(word_list)
|
|
# Copyright 2020 Nexenta by DDN, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for NexentaStor5 REST API helper
"""
import copy
import hashlib
import json
import posixpath
from unittest import mock
import uuid
import requests
import six
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.nexenta.ns5 import jsonrpc
class FakeNefProxy(object):
def __init__(self):
self.proto = 'nfs'
self.scheme = 'https'
self.port = 8443
self.hosts = ['1.1.1.1', '2.2.2.2']
self.host = self.hosts[0]
self.root = '/storage/filesystems'
self.path = 'pool/share'
self.username = 'username'
self.password = 'password'
self.retries = 3
self.timeout = 5
self.session = mock.Mock()
self.session.headers = {}
def __getattr__(self, name):
pass
def delay(self, interval):
pass
def delete_bearer(self):
pass
def update_lock(self):
pass
def update_token(self, token):
pass
def update_host(self, host):
pass
def url(self, path):
return '%s://%s:%s/%s' % (self.scheme, self.host, self.port, path)
class TestNefException(test.TestCase):
def test_message(self):
message = 'test message 1'
result = jsonrpc.NefException(message)
self.assertIn(message, result.msg)
def test_message_kwargs(self):
code = 'EAGAIN'
message = 'test message 2'
result = jsonrpc.NefException(message, code=code)
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
def test_no_message_kwargs(self):
code = 'ESRCH'
message = 'test message 3'
result = jsonrpc.NefException(None, code=code, message=message)
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
def test_message_plus_kwargs(self):
code = 'ENODEV'
message1 = 'test message 4'
message2 = 'test message 5'
result = jsonrpc.NefException(message1, code=code, message=message2)
self.assertEqual(code, result.code)
self.assertIn(message2, result.msg)
def test_dict(self):
code = 'ENOENT'
message = 'test message 4'
result = jsonrpc.NefException({'code': code, 'message': message})
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
def test_kwargs(self):
code = 'EPERM'
message = 'test message 5'
result = jsonrpc.NefException(code=code, message=message)
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
def test_dict_kwargs(self):
code = 'EINVAL'
message = 'test message 6'
result = jsonrpc.NefException({'code': code}, message=message)
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
def test_defaults(self):
code = 'EBADMSG'
message = 'NexentaError'
result = jsonrpc.NefException()
self.assertEqual(code, result.code)
self.assertIn(message, result.msg)
class TestNefRequest(test.TestCase):
def setUp(self):
super(TestNefRequest, self).setUp()
self.proxy = FakeNefProxy()
def fake_response(self, method, path, payload, code, content):
request = requests.PreparedRequest()
request.method = method
request.url = self.proxy.url(path)
request.headers = {'Content-Type': 'application/json'}
request.body = None
if method in ['get', 'delete']:
request.params = payload
elif method in ['put', 'post']:
request.data = json.dumps(payload)
response = requests.Response()
response.request = request
response.status_code = code
if content or content is None:
response._content = json.dumps(content)
else:
response._content = ''
return response
def test___call___invalid_method(self):
method = 'unsupported'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
self.assertRaises(jsonrpc.NefException, instance, path)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test___call___none_path(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
check_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, None)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test___call___empty_path(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
check_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, '')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___get(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {}
content = {'name': 'snapshot'}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___get_payload(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'name': 'snapshot'}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___get_data_payload(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
data = [
{
'name': 'fs1',
'path': 'pool/fs1'
},
{
'name': 'fs2',
'path': 'pool/fs2'
}
]
content = {'data': data}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
instance.hook(response)
request.assert_called_with(method, path, payload)
self.assertEqual(data, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.find_host')
def test___call___get_invalid_payload(self, find_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = 'bad data'
find_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___delete(self, request):
method = 'delete'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'name': 'snapshot'}
content = ''
expected = None
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___delete_payload(self, request):
method = 'delete'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'name': 'snapshot'}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.find_host')
def test___call___delete_invalid_payload(self, find_host):
method = 'delete'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = 'bad data'
find_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___post(self, request):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {}
content = None
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___post_payload(self, request):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = None
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.find_host')
def test___call___post_invalid_payload(self, find_host):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = 'bad data'
find_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___put(self, request):
method = 'put'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {}
content = None
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___put_payload(self, request):
method = 'put'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = None
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test___call___put_invalid_payload(self, check_host):
method = 'put'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = 'bad data'
check_host.return_value = True
self.assertRaises(jsonrpc.NefException, instance, path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___non_ok_response(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'ENOENT', 'message': 'error'}
response = self.fake_response(method, path, payload, 500, content)
request.return_value = response
self.assertRaises(jsonrpc.NefException, instance, path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.find_host')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___request_after_find_host(self, request, find_host):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = None
response = self.fake_response(method, path, payload, 200, content)
request.side_effect = [requests.exceptions.Timeout, response]
find_host.return_value = True
result = instance(path, payload)
request.assert_called_with(method, path, payload)
self.assertEqual(content, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.find_host')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test___call___request_find_host_error(self, request, find_host):
method = 'put'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
request.side_effect = requests.exceptions.Timeout
find_host.return_value = False
self.assertRaises(requests.exceptions.Timeout, instance, path, payload)
def test_hook_default(self):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'name': 'dataset'}
response = self.fake_response(method, path, payload, 303, content)
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_200_empty(self):
method = 'delete'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'storage/filesystems'
payload = {'force': True}
content = None
response = self.fake_response(method, path, payload, 200, content)
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_201_none(self):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'storage/snapshots'
payload = {'path': 'parent/child@name'}
content = None
response = self.fake_response(method, path, payload, 201, content)
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_201_empty(self):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'storage/snapshots'
payload = {'path': 'parent/child@name'}
content = ''
response = self.fake_response(method, path, payload, 201, content)
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_500_empty(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'storage/pools'
payload = {'poolName': 'tank'}
content = None
response = self.fake_response(method, path, payload, 500, content)
self.assertRaises(jsonrpc.NefException, instance.hook, response)
def test_hook_200_bad_content(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'storage/volumes'
payload = {'name': 'test'}
content = None
response = self.fake_response(method, path, payload, 200, content)
response._content = 'bad_content'
self.assertRaises(jsonrpc.NefException, instance.hook, response)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_not_ok_limit(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
instance.stat[401] = 1000 * instance.proxy.retries
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'EAUTH'}
response = self.fake_response(method, path, payload, 401, content)
request.return_value = response
result = instance.hook(response)
self.assertEqual(response, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.auth')
def test_hook_401_ok(self, auth, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'EAUTH'}
response = self.fake_response(method, path, payload, 401, content)
auth.return_value = True
content2 = {'name': 'test'}
response2 = self.fake_response(method, path, payload, 200, content2)
request.return_value = response2
self.proxy.session.send.return_value = content2
result = instance.hook(response)
self.assertEqual(content2, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.auth')
def test_hook_401_error(self, auth):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
auth.return_value = False
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'EAUTH'}
response = self.fake_response(method, path, payload, 401, content)
self.assertRaises(jsonrpc.NefException, instance.hook, response)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test_hook_404_check_host_error(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'ENOENT'}
response = self.fake_response(method, path, payload, 404, content)
check_host.return_value = False
self.assertRaises(jsonrpc.NefException, instance.hook, response)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test_hook_404_check_host_ok(self, check_host, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'ENOENT'}
response = self.fake_response(method, path, payload, 404, content)
check_host.return_value = True
request.return_value = response
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_500_busy_max_retries(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
instance.stat[500] = self.proxy.retries
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'EBUSY'}
response = self.fake_response(method, path, payload, 500, content)
self.assertRaises(jsonrpc.NefException, instance.hook, response)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_hook_500_busy_ok(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'code': 'EEXIST'}
response = self.fake_response(method, path, payload, 500, content)
request.return_value = response
result = instance.hook(response)
self.assertEqual(response, result)
def test_hook_201_no_monitor(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'monitor': 'unknown'}
response = self.fake_response(method, path, payload, 202, content)
self.assertRaises(jsonrpc.NefException, instance.hook, response)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_hook_201_ok(self, request):
method = 'delete'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {
'links': [{
'rel': 'monitor',
'href': '/jobStatus/jobID'
}]
}
response = self.fake_response(method, path, payload, 202, content)
content2 = None
response2 = self.fake_response(method, path, payload, 201, content2)
request.return_value = response2
result = instance.hook(response)
self.assertEqual(response2, result)
def test_200_no_data(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'name': 'test'}
response = self.fake_response(method, path, payload, 200, content)
result = instance.hook(response)
self.assertEqual(response, result)
def test_200_pagination_end(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
content = {'data': 'value'}
response = self.fake_response(method, path, payload, 200, content)
result = instance.hook(response)
self.assertEqual(response, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_200_pagination_next(self, request):
method = 'get'
payload = None
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
content = {
'data': [{
'name': 'test'
}],
'links': [{
'rel': 'next',
'href': '%s?limit=100' % path
}]
}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance.hook(response)
self.assertEqual(response, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_200_pagination_payload_next(self, request):
method = 'get'
payload = {'key': 'value'}
instance = jsonrpc.NefRequest(self.proxy, method)
instance.payload = payload
path = 'parent/child'
content = {
'data': [{
'name': 'test'
}],
'links': [{
'rel': 'next',
'href': '%s?limit=100' % path
}]
}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance.hook(response)
self.assertEqual(response, result)
def test_request_get_payload(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
expected = {'name': 'dataset'}
url = self.proxy.url(path)
kwargs = {
'params': payload,
'timeout': self.proxy.timeout,
'hooks': {
'response': instance.hook
}
}
self.proxy.session.request.return_value = expected
result = instance.request(method, path, payload)
self.proxy.session.request.assert_called_with(method, url, **kwargs)
self.assertEqual(expected, result)
def test_request_get(self):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = None
expected = {'name': 'dataset'}
url = self.proxy.url(path)
kwargs = {
'timeout': self.proxy.timeout,
'hooks': {
'response': instance.hook
}
}
self.proxy.session.request.return_value = expected
result = instance.request(method, path, payload)
self.proxy.session.request.assert_called_with(method, url, **kwargs)
self.assertEqual(expected, result)
def test_request_post(self):
method = 'post'
instance = jsonrpc.NefRequest(self.proxy, method)
path = 'parent/child'
payload = {'key': 'value'}
expected = None
url = self.proxy.url(path)
kwargs = {
'data': json.dumps(payload),
'timeout': self.proxy.timeout,
'hooks': {
'response': instance.hook
}
}
self.proxy.session.request.return_value = expected
result = instance.request(method, path, payload)
self.proxy.session.request.assert_called_with(method, url, **kwargs)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_auth(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
method = 'post'
path = '/auth/login'
payload = {
'username': self.proxy.username,
'password': self.proxy.password
}
content = {'token': 'test'}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance.auth()
request.assert_called_with(method, path, payload)
self.assertTrue(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_auth_error(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
method = 'post'
path = '/auth/login'
payload = {
'username': self.proxy.username,
'password': self.proxy.password
}
content = {'data': 'noauth'}
response = self.fake_response(method, path, payload, 401, content)
request.return_value = response
result = instance.auth()
request.assert_called_with(method, path, payload)
self.assertFalse(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test_find_host(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
check_host.return_value = True
result = instance.find_host()
self.assertTrue(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test_find_host_timeout(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
check_host.side_effect = requests.exceptions.Timeout
result = instance.find_host()
self.assertFalse(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_find_host_404(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = self.proxy.root
payload = {
'fields': 'path',
'path': self.proxy.path
}
content = {'code': 'ENOENT'}
response = self.fake_response(method, path, payload, 404, content)
request.side_effect = [response, response]
result = instance.find_host()
request.assert_called_with(method, path, payload)
self.assertFalse(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.check_host')
def test_find_host_error(self, check_host):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
check_host.side_effect = [
requests.exceptions.RequestException,
jsonrpc.NefException
]
result = instance.find_host()
self.assertFalse(result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest.request')
def test_check_host_ok(self, request):
method = 'get'
instance = jsonrpc.NefRequest(self.proxy, method)
path = self.proxy.path
payload = {
'path': self.proxy.path,
'fields': 'path'
}
content = {
'data': [{
'path': path
}]
}
response = self.fake_response(method, path, payload, 200, content)
request.return_value = response
result = instance.check_host()
self.assertTrue(result)
def test_parse(self):
method = 'get'
rel = 'monitor'
href = 'jobStatus/jobID'
content = {
'links': [
[1, 2],
'bad link',
{
'rel': 'next',
'href': href
},
{
'rel': rel,
'href': href
}
]
}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = href, {}
self.assertEqual(expected, result)
def test_parse_no_content(self):
method = 'get'
rel = 'next'
content = {}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = None, None
self.assertEqual(expected, result)
def test_parse_no_links(self):
method = 'get'
rel = 'next'
content = {'a': 'b'}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = None, None
self.assertEqual(expected, result)
def test_parse_links_no_list(self):
method = 'get'
rel = 'next'
content = {
'links': 'data'
}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = None, None
self.assertEqual(expected, result)
def test_parse_no_rel(self):
method = 'get'
rel = 'next'
content = {
'links': [
{
'rel': 'monitor',
'href': '/jobs/jobID'
}
]
}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = None, None
self.assertEqual(expected, result)
def test_parse_no_href(self):
method = 'get'
rel = 'next'
content = {
'links': [
{
'rel': rel
}
]
}
instance = jsonrpc.NefRequest(self.proxy, method)
result = instance.parse(content, rel)
expected = None, None
self.assertEqual(expected, result)
class TestNefCollections(test.TestCase):
def setUp(self):
super(TestNefCollections, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefCollections(self.proxy)
def test_path(self):
path = 'path/to/item name + - & # $ = 0'
result = self.instance.path(path)
quoted_path = six.moves.urllib.parse.quote_plus(path)
expected = posixpath.join(self.instance.root, quoted_path)
self.assertEqual(expected, result)
def test_get(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = {'name': 'dataset'}
path = self.instance.path(name)
self.proxy.get.return_value = expected
result = self.instance.get(name, payload)
self.proxy.get.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_set(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
self.proxy.put.return_value = expected
result = self.instance.set(name, payload)
self.proxy.put.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_list(self):
payload = {'key': 'value'}
expected = [{'name': 'dataset'}]
self.proxy.get.return_value = expected
result = self.instance.list(payload)
self.proxy.get.assert_called_with(self.instance.root, payload)
self.assertEqual(expected, result)
def test_create(self):
payload = {'key': 'value'}
expected = None
self.proxy.post.return_value = expected
result = self.instance.create(payload)
self.proxy.post.assert_called_with(self.instance.root, payload)
self.assertEqual(expected, result)
def test_create_exist(self):
payload = {'key': 'value'}
expected = None
self.proxy.post.side_effect = jsonrpc.NefException(code='EEXIST')
result = self.instance.create(payload)
self.proxy.post.assert_called_with(self.instance.root, payload)
self.assertEqual(expected, result)
def test_create_error(self):
payload = {'key': 'value'}
self.proxy.post.side_effect = jsonrpc.NefException(code='EBUSY')
self.assertRaises(jsonrpc.NefException, self.instance.create, payload)
self.proxy.post.assert_called_with(self.instance.root, payload)
def test_delete(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
self.proxy.delete.return_value = expected
result = self.instance.delete(name, payload)
self.proxy.delete.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_delete_not_found(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
self.proxy.delete.side_effect = jsonrpc.NefException(code='ENOENT')
result = self.instance.delete(name, payload)
self.proxy.delete.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_delete_error(self):
name = 'parent/child'
payload = {'key': 'value'}
path = self.instance.path(name)
self.proxy.delete.side_effect = jsonrpc.NefException(code='EINVAL')
self.assertRaises(jsonrpc.NefException, self.instance.delete, name,
payload)
self.proxy.delete.assert_called_with(path, payload)
class TestNefSettings(test.TestCase):
def setUp(self):
super(TestNefSettings, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefSettings(self.proxy)
def test_create(self):
payload = {'key': 'value'}
result = self.instance.create(payload)
expected = NotImplemented
self.assertEqual(expected, result)
def test_delete(self):
name = 'parent/child'
payload = {'key': 'value'}
result = self.instance.delete(name, payload)
expected = NotImplemented
self.assertEqual(expected, result)
class TestNefDatasets(test.TestCase):
def setUp(self):
super(TestNefDatasets, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefDatasets(self.proxy)
def test_rename(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'rename')
self.proxy.post.return_value = expected
result = self.instance.rename(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefSnapshots(test.TestCase):
def setUp(self):
super(TestNefSnapshots, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefSnapshots(self.proxy)
def test_clone(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'clone')
self.proxy.post.return_value = expected
result = self.instance.clone(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefVolumeGroups(test.TestCase):
def setUp(self):
super(TestNefVolumeGroups, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefVolumeGroups(self.proxy)
def test_rollback(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'rollback')
self.proxy.post.return_value = expected
result = self.instance.rollback(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefVolumes(test.TestCase):
def setUp(self):
super(TestNefVolumes, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefVolumes(self.proxy)
def test_promote(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'promote')
self.proxy.post.return_value = expected
result = self.instance.promote(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefFilesystems(test.TestCase):
def setUp(self):
super(TestNefFilesystems, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefFilesystems(self.proxy)
def test_mount(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'mount')
self.proxy.post.return_value = expected
result = self.instance.mount(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_unmount(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'unmount')
self.proxy.post.return_value = expected
result = self.instance.unmount(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_acl(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = self.instance.path(name)
path = posixpath.join(path, 'acl')
self.proxy.post.return_value = expected
result = self.instance.acl(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefHpr(test.TestCase):
def setUp(self):
super(TestNefHpr, self).setUp()
self.proxy = mock.Mock()
self.instance = jsonrpc.NefHpr(self.proxy)
def test_activate(self):
payload = {'key': 'value'}
expected = None
root = posixpath.dirname(self.instance.root)
path = posixpath.join(root, 'activate')
self.proxy.post.return_value = expected
result = self.instance.activate(payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
def test_start(self):
name = 'parent/child'
payload = {'key': 'value'}
expected = None
path = posixpath.join(self.instance.path(name), 'start')
self.proxy.post.return_value = expected
result = self.instance.start(name, payload)
self.proxy.post.assert_called_with(path, payload)
self.assertEqual(expected, result)
class TestNefProxy(test.TestCase):
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
@mock.patch.object(jsonrpc, 'NefRequest')
def setUp(self, nef_request, update_lock):
super(TestNefProxy, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.nexenta_use_https = True
self.cfg.driver_ssl_cert_verify = True
self.cfg.driver_ssl_cert_path = None
self.cfg.nexenta_user = 'user'
self.cfg.nexenta_password = 'pass'
self.cfg.nexenta_rest_address = '1.1.1.1,2.2.2.2'
self.cfg.nexenta_rest_port = 8443
self.cfg.nexenta_rest_backoff_factor = 1
self.cfg.nexenta_rest_retry_count = 3
self.cfg.nexenta_rest_connect_timeout = 1
self.cfg.nexenta_rest_read_timeout = 1
self.cfg.nas_host = '3.3.3.3'
self.cfg.nexenta_host = '4.4.4.4'
self.cfg.nas_share_path = 'pool/path/to/share'
self.proto = 'nfs'
self.proxy = jsonrpc.NefProxy(self.proto,
self.cfg.nas_share_path,
self.cfg)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___http(self, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.nexenta_use_https = False
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___no_rest_port_http(self, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.nexenta_rest_port = 0
cfg.nexenta_use_https = False
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___no_rest_port_https(self, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.nexenta_rest_port = 0
cfg.nexenta_use_https = True
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___iscsi(self, update_lock):
proto = 'iscsi'
cfg = copy.copy(self.cfg)
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___nfs_no_rest_address(self, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.nexenta_rest_address = ''
cfg.nexenta_host = ''
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
self.assertIn(cfg.nas_host, result.hosts)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___iscsi_no_rest_address(self, update_lock):
proto = 'iscsi'
cfg = copy.copy(self.cfg)
cfg.nexenta_rest_address = ''
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
self.assertIn(cfg.nexenta_host, result.hosts)
def test___init___nfs_no_management_address(self):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.nexenta_rest_address = ''
cfg.nexenta_host = ''
cfg.nas_host = ''
self.assertRaises(jsonrpc.NefException, jsonrpc.NefProxy,
proto, cfg.nas_share_path, cfg)
def test___init___invalid_storage_protocol(self):
proto = 'invalid'
cfg = copy.copy(self.cfg)
self.assertRaises(jsonrpc.NefException, jsonrpc.NefProxy,
proto, cfg.nas_share_path, cfg)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
@mock.patch('requests.packages.urllib3.disable_warnings')
def test___init___no_ssl_cert_verify(self, disable_warnings, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.driver_ssl_cert_verify = False
disable_warnings.return_value = None
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
disable_warnings.assert_called()
self.assertIsInstance(result, jsonrpc.NefProxy)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
def test___init___ssl_cert_path(self, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
cfg.driver_ssl_cert_verify = True
cfg.driver_ssl_cert_path = 'ca.crt'
update_lock.return_value = None
result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(result, jsonrpc.NefProxy)
self.assertEqual(result.session.verify, cfg.driver_ssl_cert_path)
@mock.patch.object(jsonrpc.NefProxy, 'update_lock')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRequest')
def test___getattr___(self, request, update_lock):
proto = 'nfs'
cfg = copy.copy(self.cfg)
update_lock.return_value = None
proxy = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg)
self.assertIsInstance(proxy, jsonrpc.NefProxy)
result = proxy.get('name')
self.assertIsInstance(result, mock.MagicMock)
def test_delete_bearer(self):
self.assertIsNone(self.proxy.delete_bearer())
self.assertNotIn('Authorization', self.proxy.session.headers)
self.proxy.session.headers['Authorization'] = 'Bearer token'
self.assertIsNone(self.proxy.delete_bearer())
self.assertNotIn('Authorization', self.proxy.session.headers)
def test_update_bearer(self):
token = 'token'
bearer = 'Bearer %s' % token
self.assertNotIn('Authorization', self.proxy.session.headers)
self.assertIsNone(self.proxy.update_bearer(token))
self.assertIn('Authorization', self.proxy.session.headers)
self.assertEqual(self.proxy.session.headers['Authorization'], bearer)
def test_update_token(self):
token = 'token'
bearer = 'Bearer %s' % token
self.assertIsNone(self.proxy.update_token(token))
self.assertEqual(self.proxy.tokens[self.proxy.host], token)
self.assertEqual(self.proxy.session.headers['Authorization'], bearer)
def test_update_host(self):
token = 'token'
bearer = 'Bearer %s' % token
host = self.cfg.nexenta_rest_address
self.proxy.tokens[host] = token
self.assertIsNone(self.proxy.update_host(host))
self.assertEqual(self.proxy.session.headers['Authorization'], bearer)
def test_skip_update_host(self):
host = 'nonexistent'
self.assertIsNone(self.proxy.update_host(host))
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRsf.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSettings.get')
def test_update_lock(self, get_settings, list_clusters):
guid1 = uuid.uuid4().hex
guid2 = uuid.uuid4().hex
settings = {'value': guid1}
clusters = [
{
'nodes': [
{
'machineId': guid1
},
{
'machineId': guid2
}
]
}
]
get_settings.return_value = settings
list_clusters.return_value = clusters
self.assertIsNone(self.proxy.update_lock())
guids = [guid1, guid2]
guid = ':'.join(sorted(guids))
lock = '%s:%s' % (guid, self.proxy.path)
if six.PY3:
lock = lock.encode('utf-8')
expected = hashlib.md5(lock).hexdigest()
self.assertEqual(expected, self.proxy.lock)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRsf.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSettings.get')
def test_update_lock_no_clusters(self, get_settings, list_clusters):
guid1 = uuid.uuid4().hex
guid2 = uuid.uuid4().hex
settings = {'value': guid1}
clusters = [
{
'hosts': [
{
'machineId': guid1
},
{
'machineId': guid2
}
]
}
]
get_settings.return_value = settings
list_clusters.return_value = clusters
self.assertIsNone(self.proxy.update_lock())
lock = '%s:%s' % (guid1, self.proxy.path)
if six.PY3:
lock = lock.encode('utf-8')
expected = hashlib.md5(lock).hexdigest()
self.assertEqual(expected, self.proxy.lock)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRsf.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSettings.get')
def test_update_lock_no_ids(self, get_settings, list_clusters):
guid1 = uuid.uuid4().hex
guid2 = uuid.uuid4().hex
settings = {'value': guid1}
clusters = [
{
'nodes': [
{
'machine': guid1
},
{
'machine': guid2
}
]
}
]
get_settings.return_value = settings
list_clusters.return_value = clusters
self.assertIsNone(self.proxy.update_lock())
lock = '%s:%s' % (guid1, self.proxy.path)
if six.PY3:
lock = lock.encode('utf-8')
expected = hashlib.md5(lock).hexdigest()
self.assertEqual(expected, self.proxy.lock)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefRsf.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSettings.get')
def test_update_lock_no_settings(self, get_settings, list_clusters):
get_settings.side_effect = jsonrpc.NefException
list_clusters.side_effect = jsonrpc.NefException
self.assertIsNone(self.proxy.update_lock())
guid = ':'.join(sorted(self.proxy.hosts))
lock = '%s:%s' % (guid, self.proxy.path)
if six.PY3:
lock = lock.encode('utf-8')
expected = hashlib.md5(lock).hexdigest()
self.assertEqual(expected, self.proxy.lock)
def test_url(self):
path = '/path/to/api'
result = self.proxy.url(path)
expected = '%(scheme)s://%(host)s:%(port)s%(path)s' % {
'scheme': self.proxy.scheme,
'host': self.proxy.host,
'port': self.proxy.port,
'path': path
}
self.assertEqual(expected, result)
def test_url_no_path(self):
result = self.proxy.url()
expected = '%(scheme)s://%(host)s:%(port)s' % {
'scheme': self.proxy.scheme,
'host': self.proxy.host,
'port': self.proxy.port
}
self.assertEqual(expected, result)
@mock.patch('eventlet.greenthread.sleep')
def test_delay(self, sleep):
sleep.return_value = None
for attempt in range(0, 10):
expected = float(self.proxy.backoff_factor * (2 ** (attempt - 1)))
self.assertIsNone(self.proxy.delay(attempt))
sleep.assert_called_with(expected)
|
|
from unittest import TestCase
import unittest
import os
import datetime
import requests_mock
from blueflood import TenantBluefloodFinder, TenantBluefloodReader, TenantBluefloodLeafNode, \
BluefloodClient, auth, calc_res, secs_per_res
#To run these test you need to set up the environment vars below
try:
auth_api_key = os.environ['AUTH_API_KEY']
auth_user_name = os.environ['AUTH_USER_NAME']
auth_tenant = os.environ['AUTH_TENANT']
auth_url = os.environ['AUTH_URL']
auth_config = {'blueflood':
{'authentication_module': 'rax_auth',
'authentication_class': 'BluefloodAuth',
'username': auth_user_name,
'apikey': auth_api_key,
'urls': [auth_url],
'tenant': auth_tenant}}
except Exception as e:
print e
print "Auth env undefined, not running auth tests"
auth_config = None
try:
no_auth_tenant = os.environ['NO_AUTH_TENANT']
no_auth_url = os.environ['NO_AUTH_URL']
no_auth_config = {'blueflood':
{ 'urls': [no_auth_url],
'tenant': no_auth_tenant}}
except:
print "NO_AUTH env undefined, not running no_auth tests"
no_auth_config = None
try:
from graphite.storage import FindQuery
print 'using graphite.storage.FindQuery'
except:
try:
from graphite_api.storage import FindQuery
print 'using graphite_api.storage.FindQuery'
except:
print 'rolling my own FindQuery'
class FindQuery(object):
def __init__(self, pattern, startTime, endTime):
self.pattern = pattern
self.startTime = startTime
self.endTime = endTime
def exc_callback(request, context):
raise ValueError("Test exceptions")
class BluefloodTests(TestCase):
def setUp(self):
self.alias_key = '_avg'
config = {'blueflood':
{'urls':["http://dummy.com"],
'tenant':'dummyTenant',
'submetric_aliases': {self.alias_key:'average'}}}
self.finder = TenantBluefloodFinder(config)
self.metric1 = "a.b.c"
self.metric2 = "e.f.g"
self.reader = TenantBluefloodReader(self.metric1, self.finder.tenant, self.finder.bf_query_endpoint,
self.finder.enable_submetrics, self.finder.submetric_aliases)
self.node1 = TenantBluefloodLeafNode(self.metric1, self.reader)
self.node2 = TenantBluefloodLeafNode(self.metric2, self.reader)
self.bfc = BluefloodClient(self.finder.bf_query_endpoint, self.finder.tenant,
self.finder.enable_submetrics, self.finder.submetric_aliases)
auth.set_auth(None)
def run_find(self, finder):
nodes = list(finder.find_nodes(FindQuery('rackspace.*', 0, 100)))
self.assertTrue(len(nodes) > 0)
def setup_UTC_mock(self):
#setup a mock that forces expiration
self.orig_get_current_UTC = type(auth.auth).get_current_UTC
self.orig_do_auth = type(auth.auth).do_auth
this = self
self.authCount = 0
def mock_get_current_UTC(self):
return this.orig_get_current_UTC(self) + datetime.timedelta(days=1)
def mock_do_auth(self):
this.authCount += 1
this.orig_do_auth(self)
type(auth.auth).get_current_UTC = mock_get_current_UTC
type(auth.auth).do_auth = mock_do_auth
def unset_UTC_mock(self):
type(auth.auth).get_current_UTC = self.orig_get_current_UTC
type(auth.auth).do_auth = self.orig_do_auth
def test_finder(self):
if no_auth_config:
print "\nRunning NO_AUTH tests"
finder = TenantBluefloodFinder(no_auth_config)
self.run_find(finder)
if auth_config:
print "\nRunning AUTH tests"
finder = TenantBluefloodFinder(auth_config)
self.run_find(finder)
#force re-auth
auth.auth.token = ""
self.run_find(finder)
#test expired UTC
self.setup_UTC_mock()
self.run_find(finder)
self.unset_UTC_mock()
self.assertTrue(self.authCount == 1)
def test_gen_groups(self):
# one time through without submetrics
self.bfc.enable_submetrics = False
#only 1 metric per group even though room for more
self.bfc.maxmetrics_per_req = 1
self.bfc.maxlen_per_req = 20
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b.c'], ['e.f.g']])
#allow 2 metrics per group
self.bfc.maxmetrics_per_req = 2
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b.c', 'e.f.g']])
#now only room for 1 per group
self.bfc.maxlen_per_req = 12
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b.c'], ['e.f.g']])
#no room for metric in a group
self.bfc.maxlen_per_req = 11
with self.assertRaises(IndexError):
groups = self.bfc.gen_groups([self.node1, self.node2])
# now with submetrics
self.bfc.enable_submetrics = True
#only 1 metric per group even though room for more
self.bfc.maxmetrics_per_req = 1
self.bfc.maxlen_per_req = 15
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b'], ['e.f']])
#allow 2 metrics per group
self.bfc.maxmetrics_per_req = 2
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b', 'e.f']])
#now only room for 1 per group
self.bfc.maxlen_per_req = 10
groups = self.bfc.gen_groups([self.node1, self.node2])
self.assertTrue(groups == [['a.b'], ['e.f']])
#no room for metric in a group
self.bfc.maxlen_per_req = 9
with self.assertRaises(IndexError):
groups = self.bfc.gen_groups([self.node1, self.node2])
def make_data(self, start, step):
# should be 0th element in response
first_timestamp = start * 1000
# should be skipped because it overlaps first_timestamp + 1000*step
second_timestamp = first_timestamp + (1000 * step - 1)
# should be 4th element
third_timestamp = first_timestamp + (5000 * step - 1)
# should be 7th element
fourth_timestamp = first_timestamp + (7000 * step + 1)
metric1 = self.metric1
metric2 = self.metric2
if self.bfc.enable_submetrics:
submetric = '.' + self.alias_key
metric1 += submetric
metric2 += submetric
node1 = TenantBluefloodLeafNode(metric1, self.reader)
node2 = TenantBluefloodLeafNode(metric2, self.reader)
return ([node1, node2],
[{u'data':
[{u'timestamp': third_timestamp, u'average': 4449.97, u'numPoints': 1},
{u'timestamp': fourth_timestamp, u'average': 14449.97, u'numPoints': 1}],
u'metric': self.metric1, u'type': u'number', u'unit': u'unknown'},
{u'data':
[{u'timestamp': first_timestamp, u'average': 6421.18, u'numPoints': 1},
{u'timestamp': second_timestamp, u'average': 26421.18, u'numPoints': 1}],
u'metric': self.metric2, u'type': u'number', u'unit': u'unknown'}])
def test_gen_dict(self):
step = 3000
start = 1426120000
end = 1426147000
nodes, responses = self.make_data(start, step)
dictionary = self.bfc.gen_dict(nodes, responses, start, end, step)
self.assertDictEqual(dictionary,
{nodes[1].path: [6421.18, None, None, None, None, None, None, None, None],
nodes[0].path: [None, None, None, None, 4449.97, None, None, 14449.97, None]})
# check that it handles unfound metric correctly
nodes[1].path += '.dummy'
dictionary = self.bfc.gen_dict(nodes, responses, start, end, step)
self.assertTrue(dictionary ==
{nodes[0].path: [None, None, None, None, 4449.97, None, None, 14449.97, None]})
# now with submetrics
self.bfc.enable_submetrics = True
nodes, responses = self.make_data(start, step)
dictionary = self.bfc.gen_dict(nodes, responses, start, end, step)
self.assertTrue(dictionary ==
{nodes[1].path: [6421.18, None, None, None, None, None, None, None, None],
nodes[0].path: [None, None, None, None, 4449.97, None, None, 14449.97, None]})
def test_gen_responses(self):
step = 3000
start = 1426120000
end = 1426147000
groups1 = [[self.metric1, self.metric2]]
payload = self.bfc.gen_payload(start, end, 'FULL')
endpoint = self.bfc.get_multi_endpoint(self.finder.bf_query_endpoint, self.finder.tenant)
# test 401 error
with requests_mock.mock() as m:
m.post(endpoint, json={}, status_code=401)
responses = self.bfc.gen_responses(groups1, payload)
self.assertTrue(responses == [])
#test single group
_, responses = self.make_data(start, step)
with requests_mock.mock() as m:
m.post(endpoint, json={'metrics':responses}, status_code=200)
new_responses = self.bfc.gen_responses(groups1, payload)
self.assertTrue(responses == new_responses)
#test multiple groups
groups2 = [[self.metric1], [self.metric2]]
with requests_mock.mock() as m:
global json_data
json_data = [{'metrics':responses[0:1]},{'metrics':responses[1:]}]
def json_callback(request, context):
global json_data
response = json_data[0]
json_data = json_data[1:]
return response
m.post(endpoint, json=json_callback, status_code=200)
new_responses = self.bfc.gen_responses(groups2, payload)
self.assertTrue(responses == new_responses)
def test_find_nodes(self):
endpoint = self.finder.find_metrics_endpoint(self.finder.bf_query_endpoint, self.finder.tenant)
# one time through without submetrics
self.finder.enable_submetrics = False
with requests_mock.mock() as m:
#test 401 errors
query = FindQuery("*", 1, 2)
m.get(endpoint, json={}, status_code=401)
metrics = self.finder.find_nodes(query)
self.assertTrue(list(metrics) == [])
with requests_mock.mock() as m:
query = FindQuery("*", 1, 2)
m.get(endpoint, json=exc_callback, status_code=401)
with self.assertRaises(ValueError):
list(self.finder.find_nodes(query))
def get_start(x):
return lambda y: '.'.join(y.split('.')[0:x])
get_path = lambda x: x.path
def query_test(query_pattern, jdata, qlen, search_results):
with requests_mock.mock() as m:
query = FindQuery(query_pattern, 1, 2)
m.get(endpoint, json=jdata, status_code=200)
metrics = self.finder.find_nodes(query)
self.assertSequenceEqual(map(get_path, list(metrics)),
map(get_start(qlen), search_results))
query_test("*",
[{u'metric': self.metric1, u'unit': u'percent'},
{u'metric': self.metric2, u'unit': u'percent'}],
1, [self.metric1, self.metric2])
query_test("a.*",
[{u'metric': self.metric1, u'unit': u'percent'}],
2, [self.metric1])
query_test("a.b.*",
[{u'metric': self.metric1, u'unit': u'percent'}],
3, [self.metric1])
query_test("a.b.c",
[{u'metric': self.metric1, u'unit': u'percent'}],
3, [self.metric1])
# now again, with submetrics
self.finder.enable_submetrics = True
query_test("*",
[{u'metric': self.metric1, u'unit': u'percent'},
{u'metric': self.metric2, u'unit': u'percent'}],
1, [self.metric1, self.metric2])
query_test("a.*",
[{u'metric': self.metric1, u'unit': u'percent'}],
2, [self.metric1])
query_test("a.b.*",
[{u'metric': self.metric1, u'unit': u'percent'},
{u'metric': 'a.bb.c', u'unit': u'percent'}],
3, [self.metric1])
query_test("a.b.c",
[{u'metric': self.metric1, u'unit': u'percent'}],
3, [self.metric1])
query_test("a.b.c.*",
[{u'metric': self.metric1, u'unit': u'percent'},
{u'metric': (self.metric1 + 'd'), u'unit': u'percent'}],
4, [self.metric1 + '.' + self.alias_key])
query_test("a.b.c._avg",
[{u'metric': self.metric1, u'unit': u'percent'}],
4, [self.metric1 + '.' + self.alias_key])
def test_fetch(self):
step = 3000
start = 1426120000
end = 1426147000
endpoint = self.bfc.get_multi_endpoint(self.finder.bf_query_endpoint, self.finder.tenant)
nodes, responses = self.make_data(start, step)
with requests_mock.mock() as m:
m.post(endpoint, json={'metrics':responses}, status_code=200)
time_info, dictionary = self.finder.fetch_multi(nodes, start, end)
self.assertSequenceEqual(time_info, (1426120000, 1426147300, 300))
self.assertDictEqual(dictionary,
{'e.f.g':
[6421.18, None, None, None, None, None, None, None, None, 26421.18, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None],
'a.b.c':
[None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 4449.97, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 14449.97, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None]})
time2, seq = self.reader.fetch(start, end)
self.assertSequenceEqual(time2, (1426120000, 1426147300, 300))
self.assertSequenceEqual(seq, dictionary[self.metric1])
with requests_mock.mock() as m:
m.post(endpoint, json=exc_callback, status_code=200)
with self.assertRaises(ValueError):
self.reader.fetch(start, end)
def test_calc_res(self):
start = 0
stop1 = secs_per_res['MIN240']*801
stop2 = stop1 - 1
self.assertEqual(calc_res(start, stop1), 'MIN1440')
self.assertEqual(calc_res(start, stop2), 'MIN240')
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding:utf8 -*-
# File : opr.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 3/18/17
#
# This file is part of TensorArtist.
from tartist.app.rl.base import DiscreteActionSpace
from tartist.app.rl.base import ProxyRLEnvironBase
from tartist.core import get_logger
from tartist.core.utils.meta import run_once
import copy
import functools
import collections
import numpy as np
logger = get_logger(__file__)
__all__ = [
'TransparentAttributeProxyRLEnviron',
'AutoRestartProxyRLEnviron',
'RepeatActionProxyRLEnviron', 'NOPFillProxyRLEnviron',
'LimitLengthProxyRLEnviron', 'MapStateProxyRLEnviron',
'MapActionProxyRLEnviron', 'HistoryFrameProxyRLEnviron',
'ManipulateRewardProxyRLEnviron', 'manipulate_reward',
'remove_proxies', 'find_proxy']
class TransparentAttributeProxyRLEnviron(ProxyRLEnvironBase):
def __getattr__(self, name):
return getattr(remove_proxies(self), name)
class AutoRestartProxyRLEnviron(ProxyRLEnvironBase):
def _action(self, action):
r, is_over = self.proxy.action(action)
if is_over:
self.finish()
self.restart()
return r, is_over
class RepeatActionProxyRLEnviron(ProxyRLEnvironBase):
def __init__(self, other, repeat):
super().__init__(other)
self._repeat = repeat
def _action(self, action):
total_r = 0
for i in range(self._repeat):
r, is_over = self.proxy.action(action)
total_r += r
if is_over:
break
return total_r, is_over
class NOPFillProxyRLEnviron(ProxyRLEnvironBase):
def __init__(self, other, nr_fill, nop=0):
super().__init__(other)
self._nr_fill = nr_fill
self._nop = nop
def _action(self, action):
total_r, is_over = self.proxy.action(action)
for i in range(self._nr_fill):
r, is_over = self.proxy.action(self._nop)
total_r += r
if is_over:
break
return total_r, is_over
class LimitLengthProxyRLEnviron(ProxyRLEnvironBase):
def __init__(self, other, limit):
super().__init__(other)
self._limit = limit
self._cnt = 0
@property
def limit(self):
return self._limit
def set_limit(self, limit):
self._limit = limit
return self
def _action(self, action):
r, is_over = self.proxy.action(action)
self._cnt += 1
if self._limit is not None and self._cnt >= self._limit:
is_over = True
return r, is_over
def _restart(self, *args, **kwargs):
super()._restart(*args, **kwargs)
self._cnt = 0
class MapStateProxyRLEnviron(ProxyRLEnvironBase):
def __init__(self, other, func):
super().__init__(other)
self._func = func
def _get_current_state(self):
return self._func(self.proxy.current_state)
class MapActionProxyRLEnviron(ProxyRLEnvironBase):
def __init__(self, other, mapping):
super().__init__(other)
assert type(mapping) in [tuple, list]
for i in mapping:
assert type(i) is int
self._mapping = mapping
action_space = other.action_space
assert isinstance(action_space, DiscreteActionSpace)
action_meanings = [action_space.action_meanings[i] for i in mapping]
self._action_space = DiscreteActionSpace(len(mapping), action_meanings)
def _get_action_space(self):
return self._action_space
def _action(self, action):
assert action < len(self._mapping)
return self.proxy.action(self._mapping[action])
HistoryFrameProxyRLEnviron_copy_warning = run_once(lambda: logger.warn('HistoryFrameProxyRLEnviron._copy' +
HistoryFrameProxyRLEnviron._copy_history.__doc__))
class HistoryFrameProxyRLEnviron(ProxyRLEnvironBase):
@staticmethod
def __zeros_like(v):
if type(v) is tuple:
return tuple(HistoryFrameProxyRLEnviron.__zeros_like(i) for i in v)
assert isinstance(v, np.ndarray)
return np.zeros_like(v, dtype=v.dtype)
@staticmethod
def __concat(history):
last = history[-1]
if type(last) is tuple:
return tuple(HistoryFrameProxyRLEnviron.__concat(i) for i in zip(*history))
return np.concatenate(history, axis=-1)
def __init__(self, other, history_length):
super().__init__(other)
self._history = collections.deque(maxlen=history_length)
def _get_current_state(self):
while len(self._history) != self._history.maxlen:
assert len(self._history) > 0
v = self._history[-1]
self._history.appendleft(self.__zeros_like(v))
return self.__concat(self._history)
def _set_current_state(self, state):
if len(self._history) == self._history.maxlen:
self._history.popleft()
self._history.append(state)
# Use shallow copy
def _copy_history(self, _called_directly=True):
"""DEPRECATED: (2017-12-23) Use copy_history directly."""
if _called_directly:
HistoryFrameProxyRLEnviron_copy_warning()
return copy.copy(self._history)
def _restore_history(self, history, _called_directly=True):
"""DEPRECATED: (2017-12-23) Use restore_history directly."""
if _called_directly:
HistoryFrameProxyRLEnviron_copy_warning()
assert isinstance(history, collections.deque)
assert history.maxlen == self._history.maxlen
self._history = copy.copy(history)
def copy_history(self):
return self._copy_history(_called_directly=False)
def restore_history(self, history):
return self._restore_history(history, _called_directly=False)
def _action(self, action):
r, is_over = self.proxy.action(action)
self._set_current_state(self.proxy.current_state)
return r, is_over
def _restart(self, *args, **kwargs):
super()._restart(*args, **kwargs)
self._history.clear()
self._set_current_state(self.proxy.current_state)
class ManipulateRewardProxyRLEnviron(ProxyRLEnvironBase):
"""DEPRECATED: (2017-11-20) Use manipulate_reward instead."""
def __init__(self, other, func):
logger.warn('ManipulateRewardProxyRLEnviron may cause wrong reward history; use manipulate_reward instead.')
super().__init__(other)
self._func = func
def _action(self, action):
r, is_over = self.proxy.action(action)
return self._func(r), is_over
def manipulate_reward(player, func):
old_func = player._action
@functools.wraps(old_func)
def new_func(action):
r, is_over = old_func(action)
return func(r), is_over
player._action = new_func
return player
def remove_proxies(environ):
"""Remove all wrapped proxy environs"""
while isinstance(environ, ProxyRLEnvironBase):
environ = environ.proxy
return environ
def find_proxy(environ, proxy_cls):
while not isinstance(environ, proxy_cls) and isinstance(environ, ProxyRLEnvironBase):
environ = environ.proxy
if isinstance(environ, proxy_cls):
return environ
return None
|
|
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Quantity class and related.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import copy
import decimal
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_almost_equal)
from ...tests.helper import raises, pytest
from ...utils import isiterable, minversion
from ...utils.compat import NUMPY_LT_1_7
from ... import units as u
from ...units.quantity import _UNIT_NOT_INITIALISED
from ...extern.six.moves import xrange
from ...extern.six.moves import cPickle as pickle
from ...extern import six
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation(object):
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
q4 = u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
q1 = u.Quantity(11.412, unit="testingggg")
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve float32
a3 = np.array([1.,2.], dtype=np.float32)
q3 = u.Quantity(a3, u.yr)
assert q3.dtype == a3.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal('10.25'), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)
assert q5.dtype == object
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.), order='C')
qcc = u.Quantity(ac, u.m, order='C')
assert qcc.flags['C_CONTIGUOUS']
qcf = u.Quantity(ac, u.m, order='F')
assert qcf.flags['F_CONTIGUOUS']
qca = u.Quantity(ac, u.m, order='A')
assert qca.flags['C_CONTIGUOUS']
# check it works also when passing in a quantity
assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']
af = np.array(np.arange(10.), order='F')
qfc = u.Quantity(af, u.m, order='C')
assert qfc.flags['C_CONTIGUOUS']
qff = u.Quantity(ac, u.m, order='F')
assert qff.flags['F_CONTIGUOUS']
qfa = u.Quantity(af, u.m, order='A')
assert qfa.flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = 'm'
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.*a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.
assert mylookalike[2] == 0.
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.
assert mylookalike[2] == 2.
mylookalike.unit = 'nonsense'
with pytest.raises(TypeError):
u.Quantity(mylookalike)
class TestQuantityOperations(object):
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15. * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416,
decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, 'm*s')
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, 'm/s')
assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')
def test_power(self):
# raise quantity to a power
new_quantity = self.q1 ** 2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1 ** 3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
""" When trying to add or subtract units that aren't compatible, throw an error """
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
new_q = q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
type_err_msg = ("Unsupported operand type(s) for ufunc add: "
"'Quantity' and 'dict'")
with pytest.raises(TypeError) as exc:
q1 + {'a': 1}
assert exc.value.args[0] == type_err_msg
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3. * u.m / u.km
dq1 = dq + 1. * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
""" Perform a more complicated test """
from .. import imperial
# Multiple units
distance = u.Quantity(15., u.meter)
time = u.Quantity(11., u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(
velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)
new_q = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))
# Area
side1 = u.Quantity(11., u.centimeter)
side2 = u.Quantity(7., u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77., decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1. * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000. * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1. * u.cm == 1.
assert 1. * u.cm != 1.
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = ("Only dimensionless scalar quantities "
"can be converted to Python scalars")
index_err_msg = ("Only integer dimensionless scalar quantities "
"can be converted to a Python index")
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
if six.PY2:
with pytest.raises(TypeError) as exc:
long(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to(u.dimensionless_unscaled).value)
assert int(q2) == int(q2.to(u.dimensionless_unscaled).value)
if six.PY2:
assert long(q2) == long(q2.to(u.dimensionless_unscaled).value)
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
if six.PY2:
assert long(q3) == 1
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.
assert int(q4) == 2
if six.PY2:
assert long(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
if six.PY2:
with pytest.raises(TypeError) as exc:
long(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1., 2., 3.], u.m)
assert np.all(np.array(q) == np.array([1., 2., 3.]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, 2997924580.0)
q1 = u.Quantity(0.4, unit=u.arcsecond)
q2 = q1.to(u.au, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(q2.value, 515662.015)
assert q2.unit == u.au
assert_allclose(q3.value, 0.0066666667)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0*u.radian)
assert u.deg.is_equivalent(1*u.radian)
def test_si():
q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10. * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10. / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10. * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10. / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10. * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison(object):
def test_quantity_equality(self):
assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')
assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(
1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay(object):
scalarintq = u.Quantity(1, unit='m', dtype=int)
scalarfloatq = u.Quantity(1.3, unit='m')
arrq = u.Quantity([1, 2.3, 8.9], unit='m')
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.0>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
assert repr(self.arrq * q2) == "<Quantity [ 1. , 2.3, 8.9]>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[ 1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, '.2f') == '3.14'
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[ 1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [ 1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, '02d') == "01 m"
assert format(self.scalarfloatq, '.1f') == "1.3 m"
assert format(self.scalarfloatq, '.0f') == "1 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')
def test_repr_latex(self):
from ...units.quantity import conf
q2scalar = u.Quantity(1.5e14, 'm/s')
assert self.scalarintq._repr_latex_() == '$1 \\; \\mathrm{m}$'
assert self.scalarfloatq._repr_latex_() == '$1.3 \\; \\mathrm{m}$'
assert (q2scalar._repr_latex_() ==
'$1.5 \\times 10^{14} \\; \\mathrm{\\frac{m}{s}}$')
if NUMPY_LT_1_7:
with pytest.raises(NotImplementedError):
self.arrq._repr_latex_()
return # all arrays should fail
assert self.arrq._repr_latex_() == '$[1,~2.3,~8.9] \; \mathrm{m}$'
qmed = np.arange(100)*u.m
qbig = np.arange(1000)*u.m
qvbig = np.arange(10000)*1e9*u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
#check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' not in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r'\dots' in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$'
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2. * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correcly for non-arrays.
qsecnotarray = u.Quantity(10., u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, int)
# but with multiple dtypes, single elements are OK; need to use str()
# since numpy under python2 cannot handle unicode literals
a = np.array([(1.,2.,3.), (4.,5.,6.), (7.,8.,9.)],
dtype=[(str('x'), np.float),
(str('y'), np.float),
(str('z'), np.float)])
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0].item()
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert not qkpc0.isscalar
qkpcx = qkpc['x']
assert np.all(qkpcx.value == a['x'])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc['x'][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]['x']
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(xrange(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
if six.PY2:
with pytest.raises(TypeError):
long(qsec)
def test_array_indexing_slicing():
q = np.array([1., 2., 3.]) * u.m
assert q[0] == 1. * u.m
assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))
def test_array_setslice():
q = np.array([1., 2., 3. ]) * u.m
q[1:2] = np.array([400.]) * u.cm
assert np.all(q == np.array([1., 4., 3.]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4., u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1. * u.m / 's'
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
@raises(ValueError)
def test_quantity_invalid_unit_string():
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert 'centimeter' in attrs
assert 'cm' in attrs
assert 'parsec' in attrs
assert 'foo' in attrs
assert 'to' in attrs
assert 'value' in attrs
# Something from the base class, object
assert '__setattr__' in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = six.next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order='F')
assert q3.flags['F_CONTIGUOUS']
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order='C')
assert q4.flags['C_CONTIGUOUS']
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10. * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_from_string():
with pytest.raises(TypeError):
q = u.Quantity(u.m * "5")
# the reverse should also fail once #1408 is in
with pytest.raises(TypeError):
q = u.Quantity('5', u.m)
with pytest.raises(TypeError):
q = u.Quantity(['5'], u.m)
with pytest.raises(TypeError):
q = u.Quantity(np.array(['5']), u.m)
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
q2 = np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
@raises(ValueError)
def test_quantity_tuple_power():
(5.0 * u.m) ** (1, 2)
def test_inherit_docstrings():
assert u.Quantity.argmax.__doc__ == np.ndarray.argmax.__doc__
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from... table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])
t['a'].unit = u.kpc
qa = u.Quantity(t['a'])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t['a'])
qb = u.Quantity(t['b'])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t['b'])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t['a'], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t['a'] * 1000)
qbp = u.Quantity(t['b'], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t['b'])
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [ 1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == 'f'
if minversion(np, '1.8.0'):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[ 1, 2],
[ 10, 20],
[ 3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[ 1, 10, 2],
[ 3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[ 1, 10, 2],
[ 3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
if NUMPY_LT_1_7:
# Numpy 1.6.x has some different defaults for how to display object
# arrays (it uses the str() of the objects instead of the repr()
assert repr(a) == 'array([1.0 m, 2.0 s], dtype=object)'
assert str(a) == '[1.0 m 2.0 s]'
else:
assert repr(a) == 'array([<Quantity 1.0 m>, <Quantity 2.0 s>], dtype=object)'
assert str(a) == '[<Quantity 1.0 m> <Quantity 2.0 s>]'
|
|
import mozprocess
import subprocess
from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from .base import NullBrowser # noqa: F401
from .chrome import executor_kwargs as chrome_executor_kwargs
from ..webdriver_server import ChromeDriverServer
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "chrome_android",
"check_args": "check_args",
"browser": {None: "ChromeAndroidBrowser",
"wdspec": "NullBrowser"},
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier"}
_wptserve_ports = set()
def check_args(**kwargs):
require_arg(kwargs, "package_name")
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"package_name": kwargs["package_name"],
"device_serial": kwargs["device_serial"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args"),
"stackwalk_binary": kwargs.get("stackwalk_binary"),
"symbols_path": kwargs.get("symbols_path")}
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
# Use update() to modify the global list in place.
_wptserve_ports.update(set(
test_environment.config['ports']['http'] + test_environment.config['ports']['https'] +
test_environment.config['ports']['ws'] + test_environment.config['ports']['wss']
))
executor_kwargs = chrome_executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs)
# Remove unsupported options on mobile.
del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
assert kwargs["package_name"], "missing --package-name"
executor_kwargs["capabilities"]["goog:chromeOptions"]["androidPackage"] = \
kwargs["package_name"]
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
return {"server_host": "127.0.0.1"}
class LogcatRunner(object):
def __init__(self, logger, browser, remote_queue):
self.logger = logger
self.browser = browser
self.remote_queue = remote_queue
def start(self):
try:
self._run()
except KeyboardInterrupt:
self.stop()
def _run(self):
try:
# TODO: adb logcat -c fail randomly with message
# "failed to clear the 'main' log"
self.browser.clear_log()
except subprocess.CalledProcessError:
self.logger.error("Failed to clear logcat buffer")
self._cmd = self.browser.logcat_cmd()
self._proc = mozprocess.ProcessHandler(
self._cmd,
processOutputLine=self.on_output,
storeOutput=False)
self._proc.run()
def _send_message(self, command, *args):
try:
self.remote_queue.put((command, args))
except AssertionError:
self.logger.warning("Error when send to remote queue")
def stop(self, force=False):
if self.is_alive():
kill_result = self._proc.kill()
if force and kill_result != 0:
self._proc.kill(9)
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
def on_output(self, line):
data = {
"action": "process_output",
"process": "LOGCAT",
"command": "logcat",
"data": line
}
self._send_message("log", data)
class ChromeAndroidBrowserBase(Browser):
def __init__(self, logger,
webdriver_binary="chromedriver",
remote_queue = None,
device_serial=None,
webdriver_args=None,
stackwalk_binary=None,
symbols_path=None):
super(ChromeAndroidBrowserBase, self).__init__(logger)
self.device_serial = device_serial
self.stackwalk_binary = stackwalk_binary
self.symbols_path = symbols_path
self.remote_queue = remote_queue
self.server = ChromeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
if self.remote_queue is not None:
self.logcat_runner = LogcatRunner(self.logger,
self, self.remote_queue)
def setup(self):
self.setup_adb_reverse()
if self.remote_queue is not None:
self.logcat_runner.start()
def _adb_run(self, args):
cmd = ['adb']
if self.device_serial:
cmd.extend(['-s', self.device_serial])
cmd.extend(args)
self.logger.info(' '.join(cmd))
subprocess.check_call(cmd)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
self._adb_run(['forward', '--remove-all'])
self._adb_run(['reverse', '--remove-all'])
if self.remote_queue is not None:
self.logcat_runner.stop(force=True)
def executor_browser(self):
return ExecutorBrowser, {
"webdriver_url": self.server.url,
"capabilities": {
"goog:chromeOptions": {
"androidDeviceSerial": self.device_serial
}
}
}
def clear_log(self):
self._adb_run(['logcat', '-c'])
def logcat_cmd(self):
cmd = ['adb']
if self.device_serial:
cmd.extend(['-s', self.device_serial])
cmd.extend(['logcat', '*:D'])
return cmd
def check_crash(self, process, test):
self.maybe_parse_tombstone()
# Existence of a tombstone does not necessarily mean test target has
# crashed. Always return False so we don't change the test results.
return False
def maybe_parse_tombstone(self):
if self.stackwalk_binary:
cmd = [self.stackwalk_binary, "-a", "-w"]
if self.device_serial:
cmd.extend(["--device", self.device_serial])
cmd.extend(["--output-directory", self.symbols_path])
raw_output = subprocess.check_output(cmd)
for line in raw_output.splitlines():
self.logger.process_output("TRACE", line, "logcat")
def setup_adb_reverse(self):
self._adb_run(['wait-for-device'])
self._adb_run(['forward', '--remove-all'])
self._adb_run(['reverse', '--remove-all'])
# "adb reverse" forwards network connection from device to host.
for port in self.wptserver_ports:
self._adb_run(['reverse', 'tcp:%d' % port, 'tcp:%d' % port])
class ChromeAndroidBrowser(ChromeAndroidBrowserBase):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, package_name,
webdriver_binary="chromedriver",
remote_queue = None,
device_serial=None,
webdriver_args=None,
stackwalk_binary=None,
symbols_path=None):
super(ChromeAndroidBrowser, self).__init__(logger,
webdriver_binary, remote_queue, device_serial,
webdriver_args, stackwalk_binary, symbols_path)
self.package_name = package_name
self.wptserver_ports = _wptserve_ports
|
|
import os
import fnmatch
import shutil
from collections import defaultdict
from conans.util.files import mkdir
def report_copied_files(copied, output):
ext_files = defaultdict(list)
for f in copied:
_, ext = os.path.splitext(f)
ext_files[ext].append(os.path.basename(f))
if not ext_files:
return False
for ext, files in ext_files.items():
files_str = (", ".join(files)) if len(files) < 5 else ""
file_or_files = "file" if len(files) == 1 else "files"
if not ext:
output.info("Copied %d %s: %s" % (len(files), file_or_files, files_str))
else:
output.info("Copied %d '%s' %s: %s" % (len(files), ext, file_or_files, files_str))
return True
class FileCopier(object):
""" main responsible of copying files from place to place:
package: build folder -> package folder
imports: package folder -> user folder
export: user folder -> store "export" folder
"""
def __init__(self, root_source_folder, root_destination_folder, excluded=None):
"""
Takes the base folders to copy resources src -> dst. These folders names
will not be used in the relative names while copying
param root_source_folder: The base folder to copy things from, typically the
store build folder
param root_destination_folder: The base folder to copy things to, typicall the
store package folder
"""
self._base_src = root_source_folder
self._base_dst = root_destination_folder
self._copied = []
self._excluded = [root_destination_folder]
if excluded:
self._excluded.append(excluded)
def report(self, output):
return report_copied_files(self._copied, output)
def __call__(self, pattern, dst="", src="", keep_path=True, links=False, symlinks=None,
excludes=None, ignore_case=False):
"""
param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
param dst: the destination local folder, wrt to current conanfile dir, to which
the files will be copied. Eg: "bin"
param src: the source folder in which those files will be searched. This folder
will be stripped from the dst name. Eg.: lib/Debug/x86
param keep_path: False if you want the relative paths to be maintained from
src to dst folders, or just drop. False is useful if you want
to collect e.g. many *.libs among many dirs into a single
lib dir
return: list of copied files
"""
if symlinks is not None:
links = symlinks
# Check for ../ patterns and allow them
if pattern.startswith(".."):
rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))
base_src = os.path.dirname(rel_dir)
pattern = os.path.basename(rel_dir)
else:
base_src = self._base_src
src = os.path.join(base_src, src)
dst = os.path.join(self._base_dst, dst)
files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,
ignore_case)
copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
self._link_folders(src, dst, link_folders)
self._copied.extend(files_to_copy)
return copied_files
def _filter_files(self, src, pattern, links, excludes, ignore_case):
""" return a list of the files matching the patterns
The list will be relative path names wrt to the root src folder
"""
filenames = []
linked_folders = []
if excludes:
if not isinstance(excludes, (tuple, list)):
excludes = (excludes, )
if ignore_case:
excludes = [e.lower() for e in excludes]
else:
excludes = []
for root, subfolders, files in os.walk(src, followlinks=True):
if root in self._excluded:
subfolders[:] = []
continue
if links and os.path.islink(root):
linked_folders.append(os.path.relpath(root, src))
subfolders[:] = []
continue
basename = os.path.basename(root)
# Skip git or svn subfolders
if basename in [".git", ".svn"]:
subfolders[:] = []
continue
if basename == "test_package": # DO NOT export test_package/build folder
try:
subfolders.remove("build")
except:
pass
relative_path = os.path.relpath(root, src)
for exclude in excludes:
if fnmatch.fnmatch(relative_path, exclude):
subfolders[:] = []
files = []
break
for f in files:
relative_name = os.path.normpath(os.path.join(relative_path, f))
filenames.append(relative_name)
if ignore_case:
filenames = {f.lower(): f for f in filenames}
pattern = pattern.lower()
files_to_copy = fnmatch.filter(filenames, pattern)
for exclude in excludes:
files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
if ignore_case:
files_to_copy = [filenames[f] for f in files_to_copy]
return files_to_copy, linked_folders
@staticmethod
def _link_folders(src, dst, linked_folders):
created_links = []
for linked_folder in linked_folders:
src_link = os.path.join(src, linked_folder)
# Discard symlinks that go out of the src folder
abs_path = os.path.realpath(src_link)
relpath = os.path.relpath(abs_path, src)
if relpath.startswith("."):
continue
link = os.readlink(src_link)
# Absoluted path symlinks are a problem, convert it to relative
if os.path.isabs(link):
link = os.path.relpath(link, os.path.dirname(src_link))
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
os.remove(dst_link)
except OSError:
pass
# link is a string relative to linked_folder
# e.g.: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
created_links.append(dst_link)
# Remove empty links
for dst_link in created_links:
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
base_path = os.path.dirname(dst_link)
os.remove(dst_link)
while base_path.startswith(dst):
try: # Take advantage that os.rmdir does not delete non-empty dirs
os.rmdir(base_path)
except OSError:
break # not empty
base_path = os.path.dirname(base_path)
@staticmethod
def _copy_files(files, src, dst, keep_path, symlinks):
""" executes a multiple file copy from [(src_file, dst_file), (..)]
managing symlinks if necessary
"""
copied_files = []
for filename in files:
abs_src_name = os.path.join(src, filename)
filename = filename if keep_path else os.path.basename(filename)
abs_dst_name = os.path.normpath(os.path.join(dst, filename))
try:
os.makedirs(os.path.dirname(abs_dst_name))
except:
pass
if symlinks and os.path.islink(abs_src_name):
linkto = os.readlink(abs_src_name) # @UndefinedVariable
try:
os.remove(abs_dst_name)
except OSError:
pass
os.symlink(linkto, abs_dst_name) # @UndefinedVariable
else:
shutil.copy2(abs_src_name, abs_dst_name)
copied_files.append(abs_dst_name)
return copied_files
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Specific, edge-case tests for the MediaFile metadata layer.
"""
import os
import shutil
import _common
from _common import unittest
import beets.mediafile
class EdgeTest(unittest.TestCase):
def test_emptylist(self):
# Some files have an ID3 frame that has a list with no elements.
# This is very hard to produce, so this is just the first 8192
# bytes of a file found "in the wild".
emptylist = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 'emptylist.mp3'))
genre = emptylist.genre
self.assertEqual(genre, '')
def test_release_time_with_space(self):
# Ensures that release times delimited by spaces are ignored.
# Amie Street produces such files.
space_time = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 'space_time.mp3'))
self.assertEqual(space_time.year, 2009)
self.assertEqual(space_time.month, 9)
self.assertEqual(space_time.day, 4)
def test_release_time_with_t(self):
# Ensures that release times delimited by Ts are ignored.
# The iTunes Store produces such files.
t_time = beets.mediafile.MediaFile(
os.path.join(_common.RSRC, 't_time.m4a'))
self.assertEqual(t_time.year, 1987)
self.assertEqual(t_time.month, 3)
self.assertEqual(t_time.day, 31)
def test_tempo_with_bpm(self):
# Some files have a string like "128 BPM" in the tempo field
# rather than just a number.
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'bpm.mp3'))
self.assertEqual(f.bpm, 128)
def test_discc_alternate_field(self):
# Different taggers use different vorbis comments to reflect
# the disc and disc count fields: ensure that the alternative
# style works.
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'discc.ogg'))
self.assertEqual(f.disc, 4)
self.assertEqual(f.disctotal, 5)
def test_old_ape_version_bitrate(self):
f = beets.mediafile.MediaFile(os.path.join(_common.RSRC, 'oldape.ape'))
self.assertEqual(f.bitrate, 0)
_sc = beets.mediafile._safe_cast
class InvalidValueToleranceTest(unittest.TestCase):
def test_packed_integer_with_extra_chars(self):
pack = beets.mediafile.Packed("06a", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[0], 6)
def test_packed_integer_invalid(self):
pack = beets.mediafile.Packed("blah", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[0], 0)
def test_packed_index_out_of_range(self):
pack = beets.mediafile.Packed("06", beets.mediafile.packing.SLASHED)
self.assertEqual(pack[1], 0)
def test_safe_cast_string_to_int(self):
self.assertEqual(_sc(int, 'something'), 0)
def test_safe_cast_int_string_to_int(self):
self.assertEqual(_sc(int, '20'), 20)
def test_safe_cast_string_to_bool(self):
self.assertEqual(_sc(bool, 'whatever'), False)
def test_safe_cast_intstring_to_bool(self):
self.assertEqual(_sc(bool, '5'), True)
def test_safe_cast_string_to_float(self):
self.assertAlmostEqual(_sc(float, '1.234'), 1.234)
def test_safe_cast_int_to_float(self):
self.assertAlmostEqual(_sc(float, 2), 2.0)
def test_safe_cast_string_with_cruft_to_float(self):
self.assertAlmostEqual(_sc(float, '1.234stuff'), 1.234)
def test_safe_cast_negative_string_to_float(self):
self.assertAlmostEqual(_sc(float, '-1.234'), -1.234)
def test_safe_cast_special_chars_to_unicode(self):
us = _sc(unicode, 'caf\xc3\xa9')
self.assertTrue(isinstance(us, unicode))
self.assertTrue(us.startswith(u'caf'))
class SafetyTest(unittest.TestCase):
def _exccheck(self, fn, exc, data=''):
fn = os.path.join(_common.RSRC, fn)
with open(fn, 'w') as f:
f.write(data)
try:
self.assertRaises(exc, beets.mediafile.MediaFile, fn)
finally:
os.unlink(fn) # delete the temporary file
def test_corrupt_mp3_raises_unreadablefileerror(self):
# Make sure we catch Mutagen reading errors appropriately.
self._exccheck('corrupt.mp3', beets.mediafile.UnreadableFileError)
def test_corrupt_mp4_raises_unreadablefileerror(self):
self._exccheck('corrupt.m4a', beets.mediafile.UnreadableFileError)
def test_corrupt_flac_raises_unreadablefileerror(self):
self._exccheck('corrupt.flac', beets.mediafile.UnreadableFileError)
def test_corrupt_ogg_raises_unreadablefileerror(self):
self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError)
def test_invalid_ogg_header_raises_unreadablefileerror(self):
self._exccheck('corrupt.ogg', beets.mediafile.UnreadableFileError,
'OggS\x01vorbis')
def test_corrupt_monkeys_raises_unreadablefileerror(self):
self._exccheck('corrupt.ape', beets.mediafile.UnreadableFileError)
def test_invalid_extension_raises_filetypeerror(self):
self._exccheck('something.unknown', beets.mediafile.FileTypeError)
def test_magic_xml_raises_unreadablefileerror(self):
self._exccheck('nothing.xml', beets.mediafile.UnreadableFileError,
"ftyp")
def test_broken_symlink(self):
fn = os.path.join(_common.RSRC, 'brokenlink')
os.symlink('does_not_exist', fn)
try:
self.assertRaises(beets.mediafile.UnreadableFileError,
beets.mediafile.MediaFile, fn)
finally:
os.unlink(fn)
class SideEffectsTest(unittest.TestCase):
def setUp(self):
self.empty = os.path.join(_common.RSRC, 'empty.mp3')
def test_opening_tagless_file_leaves_untouched(self):
old_mtime = os.stat(self.empty).st_mtime
beets.mediafile.MediaFile(self.empty)
new_mtime = os.stat(self.empty).st_mtime
self.assertEqual(old_mtime, new_mtime)
class EncodingTest(unittest.TestCase):
def setUp(self):
src = os.path.join(_common.RSRC, 'full.m4a')
self.path = os.path.join(_common.RSRC, 'test.m4a')
shutil.copy(src, self.path)
self.mf = beets.mediafile.MediaFile(self.path)
def tearDown(self):
os.remove(self.path)
def test_unicode_label_in_m4a(self):
self.mf.label = u'foo\xe8bar'
self.mf.save()
new_mf = beets.mediafile.MediaFile(self.path)
self.assertEqual(new_mf.label, u'foo\xe8bar')
class ZeroLengthMediaFile(beets.mediafile.MediaFile):
@property
def length(self):
return 0.0
class MissingAudioDataTest(unittest.TestCase):
def setUp(self):
path = os.path.join(_common.RSRC, 'full.mp3')
self.mf = ZeroLengthMediaFile(path)
def test_bitrate_with_zero_length(self):
del self.mf.mgfile.info.bitrate # Not available directly.
self.assertEqual(self.mf.bitrate, 0)
class TypeTest(unittest.TestCase):
def setUp(self):
path = os.path.join(_common.RSRC, 'full.mp3')
self.mf = beets.mediafile.MediaFile(path)
def test_year_integer_in_string(self):
self.mf.year = '2009'
self.assertEqual(self.mf.year, 2009)
class SoundCheckTest(unittest.TestCase):
def test_round_trip(self):
data = beets.mediafile._sc_encode(1.0, 1.0)
gain, peak = beets.mediafile._sc_decode(data)
self.assertEqual(gain, 1.0)
self.assertEqual(peak, 1.0)
def test_decode_zero(self):
data = u' 80000000 80000000 00000000 00000000 00000000 00000000 ' \
u'00000000 00000000 00000000 00000000'
gain, peak = beets.mediafile._sc_decode(data)
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
def test_malformatted(self):
gain, peak = beets.mediafile._sc_decode(u'foo')
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet, prefetch_related_objects
from django.db.models.query import get_prefetcher
from django.db.models.sql import Query
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Article, Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book,
Bookmark, BookReview, BookWithYear, Comment, Department, Employee,
FavoriteAuthors, House, LessonEntry, ModelIterableSubclass, Person,
Qualification, Reader, Room, TaggedItem, Teacher, WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed after going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_prefetch_eq(self):
prefetch_1 = Prefetch('authors', queryset=Author.objects.all())
prefetch_2 = Prefetch('books', queryset=Book.objects.all())
self.assertEqual(prefetch_1, prefetch_1)
self.assertEqual(prefetch_1, mock.ANY)
self.assertNotEqual(prefetch_1, prefetch_2)
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(Book.objects.prefetch_related('authors'))
self.assertEqual(add_q_mock.call_count, 1)
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
).prefetch_related('authors')
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related('authors').raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all()),
),
[['houses', 'rooms']],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch('houses', to_attr='some_attr_name'),
Prefetch('houses', queryset=House.objects.prefetch_related('main_room')),
)
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=occupants))
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [['occupants', 'houses', 'main_room']])
def test_values_queryset(self):
msg = 'Prefetch querysets cannot use raw(), values(), and values_list().'
with self.assertRaisesMessage(ValueError, msg):
Prefetch('houses', House.objects.values('pk'))
with self.assertRaisesMessage(ValueError, msg):
Prefetch('houses', House.objects.values_list('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_raw_queryset(self):
msg = 'Prefetch querysets cannot use raw(), values(), and values_list().'
with self.assertRaisesMessage(ValueError, msg):
Prefetch('houses', House.objects.raw('select pk from house'))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
'add_q',
autospec=True,
side_effect=lambda self, q: add_q(self, q),
) as add_q_mock:
list(House.objects.prefetch_related(
Prefetch('occupants', queryset=Person.objects.all())
))
self.assertEqual(add_q_mock.call_count, 1)
class DefaultManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.qual1 = Qualification.objects.create(name='BA')
cls.qual2 = Qualification.objects.create(name='BSci')
cls.qual3 = Qualification.objects.create(name='MA')
cls.qual4 = Qualification.objects.create(name='PhD')
cls.teacher1 = Teacher.objects.create(name='Mr Cleese')
cls.teacher2 = Teacher.objects.create(name='Mr Idle')
cls.teacher3 = Teacher.objects.create(name='Mr Chapman')
cls.teacher1.qualifications.add(cls.qual1, cls.qual2, cls.qual3, cls.qual4)
cls.teacher2.qualifications.add(cls.qual1)
cls.teacher3.qualifications.add(cls.qual2)
cls.dept1 = Department.objects.create(name='English')
cls.dept2 = Department.objects.create(name='Physics')
cls.dept1.teachers.add(cls.teacher1, cls.teacher2)
cls.dept2.teachers.add(cls.teacher1, cls.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_prefetch_GFK_uuid_pk(self):
article = Article.objects.create(name='Django')
Comment.objects.create(comment='awesome', content_object_uuid=article)
qs = Comment.objects.prefetch_related('content_object_uuid')
self.assertEqual([c.content_object_uuid for c in qs], [article])
def test_prefetch_GFK_fk_pk(self):
book = Book.objects.create(title='Poems')
book_with_year = BookWithYear.objects.create(book=book, published_year=2019)
Comment.objects.create(comment='awesome', content_object=book_with_year)
qs = Comment.objects.prefetch_related('content_object')
self.assertEqual([c.content_object for c in qs], [book_with_year])
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
@classmethod
def setUpTestData(cls):
person1 = Person.objects.create(name='Joe')
person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(address='123 Main St')
room1_1 = Room.objects.create(name='Dining room', house=house1)
Room.objects.create(name='Lounge', house=house1)
Room.objects.create(name='Kitchen', house=house1)
house1.main_room = room1_1
house1.save()
person1.houses.add(house1)
house2 = House.objects.create(address='45 Side St')
room2_1 = Room.objects.create(name='Dining room', house=house2)
Room.objects.create(name='Lounge', house=house2)
house2.main_room = room2_1
house2.save()
person1.houses.add(house2)
house3 = House.objects.create(address='6 Downing St')
room3_1 = Room.objects.create(name='Dining room', house=house3)
Room.objects.create(name='Lounge', house=house3)
Room.objects.create(name='Kitchen', house=house3)
house3.main_room = room3_1
house3.save()
person2.houses.add(house3)
house4 = House.objects.create(address='7 Regents St')
room4_1 = Room.objects.create(name='Dining room', house=house4)
Room.objects.create(name='Lounge', house=house4)
house4.main_room = room4_1
house4.save()
person2.houses.add(house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
databases = {'default', 'other'}
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
@classmethod
def setUpTestData(cls):
LessonEntry.objects.bulk_create(
LessonEntry(id=id_, name1=name1, name2=name2)
for id_, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]
)
WordEntry.objects.bulk_create(
WordEntry(id=id_, lesson_entry_id=lesson_entry_id, name=name)
for id_, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]
)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title='Poems')
book2 = Book.objects.create(title='Jane Eyre')
book3 = Book.objects.create(title='Wuthering Heights')
book4 = Book.objects.create(title='Sense and Sensibility')
author1 = Author2.objects.create(name='Charlotte', first_book=book1)
author2 = Author2.objects.create(name='Anne', first_book=book1)
author3 = Author2.objects.create(name='Emily', first_book=book1)
author4 = Author2.objects.create(name='Jane', first_book=book4)
author1.favorite_books.add(book1, book2, book3)
author2.favorite_books.add(book1)
author3.favorite_books.add(book2)
author4.favorite_books.add(book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
cls.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = cls.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefetchedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set')
with self.assertNumQueries(0):
self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1])
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], 'bookreview_set')
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1, new_review])
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
|
# fly ArduCopter in SITL
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil, mavwp
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
FRAME='+'
TARGET='sitl'
HOME=mavutil.location(-35.362938,149.165085,584,270)
AVCHOME=mavutil.location(40.072842,-105.230575,1586,0)
homeloc = None
num_wp = 0
def hover(mavproxy, mav, hover_throttle=1450):
mavproxy.send('rc 3 %u\n' % hover_throttle)
return True
def arm_motors(mavproxy, mav):
'''arm motors'''
print("Arming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 2000\n')
mavproxy.expect('APM: ARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_armed_wait()
print("MOTORS ARMED OK")
return True
def disarm_motors(mavproxy, mav):
'''disarm motors'''
print("Disarming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 1000\n')
mavproxy.expect('APM: DISARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
return True
def takeoff(mavproxy, mav, alt_min = 30, takeoff_throttle=1700):
'''takeoff get to 30m altitude'''
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 %u\n' % takeoff_throttle)
m = mav.recv_match(type='VFR_HUD', blocking=True)
if (m.alt < alt_min):
wait_altitude(mav, alt_min, (alt_min + 5))
hover(mavproxy, mav)
print("TAKEOFF COMPLETE")
return True
# loiter - fly south west, then hold loiter within 5m position and altitude
def loiter(mavproxy, mav, holdtime=10, maxaltchange=5, maxdistchange=5):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first aim south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 170):
return False
mavproxy.send('rc 4 1500\n')
#fly south east 50m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 50):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
if success:
print("Loiter OK for %u seconds" % holdtime)
else:
print("Loiter FAILED")
return success
def change_alt(mavproxy, mav, alt_min, climb_throttle=1920, descend_throttle=1080):
'''change altitude'''
m = mav.recv_match(type='VFR_HUD', blocking=True)
if(m.alt < alt_min):
print("Rise to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % climb_throttle)
wait_altitude(mav, alt_min, (alt_min + 5))
else:
print("Lower to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % descend_throttle)
wait_altitude(mav, (alt_min -5), alt_min)
hover(mavproxy, mav)
return True
# fly a square in stabilize mode
def fly_square(mavproxy, mav, side=50, timeout=300):
'''fly a square, flying N then E'''
tstart = get_sim_time(mav)
success = True
# ensure all sticks in the middle
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 2 1500\n')
mavproxy.send('rc 3 1500\n')
mavproxy.send('rc 4 1500\n')
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim north
print("turn right towards north")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 10):
print("Failed to reach heading")
success = False
mavproxy.send('rc 4 1500\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan4_raw==1500', blocking=True)
# save bottom left corner of box as waypoint
print("Save WP 1 & 2")
save_wp(mavproxy, mav)
# switch back to stabilize mode
mavproxy.send('rc 3 1430\n')
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
# pitch forward to fly north
print("Going north %u meters" % side)
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save top left corner of square as waypoint
print("Save WP 3")
save_wp(mavproxy, mav)
# roll right to fly east
print("Going east %u meters" % side)
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save top right corner of square as waypoint
print("Save WP 4")
save_wp(mavproxy, mav)
# pitch back to fly south
print("Going south %u meters" % side)
mavproxy.send('rc 2 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save bottom right corner of square as waypoint
print("Save WP 5")
save_wp(mavproxy, mav)
# roll left to fly west
print("Going west %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save bottom left corner of square (should be near home) as waypoint
print("Save WP 6")
save_wp(mavproxy, mav)
# descend to 10m
print("Descend to 10m in Loiter")
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
mavproxy.send('rc 3 1300\n')
time_left = timeout - (get_sim_time(mav) - tstart)
print("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
if not wait_altitude(mav, -10, 10, time_left):
print("Failed to reach alt of 10m")
success = False
save_wp(mavproxy, mav)
return success
def fly_RTL(mavproxy, mav, side=60, timeout=250):
'''Return, land'''
print("# Enter RTL")
mavproxy.send('switch 3\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
if(m.alt <= 1 and home_distance < 10):
return True
return False
def fly_throttle_failsafe(mavproxy, mav, side=60, timeout=180):
'''Fly east, Failsafe, return, land'''
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 135):
return False
mavproxy.send('rc 4 1500\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
hover(mavproxy, mav)
failed = False
# fly east 60 meters
print("# Going forward %u meters" % side)
mavproxy.send('rc 2 1350\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
# pull throttle low
print("# Enter Failsafe")
mavproxy.send('rc 3 900\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# check if we've reached home
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached failsafe home OK")
return True
print("Failed to land on failsafe RTL - timed out after %u seconds" % timeout)
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize mode
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
return False
def fly_battery_failsafe(mavproxy, mav, timeout=30):
# assume failure
success = False
# switch to loiter mode so that we hold position
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
mavproxy.send("rc 3 1500\n")
# enable battery failsafe
mavproxy.send("param set FS_BATT_ENABLE 1\n")
# trigger low voltage
mavproxy.send('param set SIM_BATT_VOLTAGE 10\n')
# wait for LAND mode
new_mode = wait_mode(mav, 'LAND')
if new_mode == 'LAND':
success = True
# disable battery failsafe
mavproxy.send('param set FS_BATT_ENABLE 0\n')
# return status
if success:
print("Successfully entered LAND mode after battery failsafe")
else:
print("Failed to enter LAND mode after battery failsafe")
return success
# fly_stability_patch - fly south, then hold loiter within 5m position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(mavproxy, mav, holdtime=30, maxaltchange=5, maxdistchange=10):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first south
print("turn south")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 180):
return False
mavproxy.send('rc 4 1500\n')
#fly west 80m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 80):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
# cut motor 1 to 55% efficiency
print("Cutting motor 1 to 55% efficiency")
mavproxy.send('param set SIM_ENGINE_MUL 0.55\n')
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
# restore motor 1 to 100% efficiency
mavproxy.send('param set SIM_ENGINE_MUL 1.0\n')
if success:
print("Stability patch and Loiter OK for %u seconds" % holdtime)
else:
print("Stability Patch FAILED")
return success
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(mavproxy, mav, timeout=180):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# enable fence
mavproxy.send('param set FENCE_ENABLE 1\n')
# first east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 160):
return False
mavproxy.send('rc 4 1500\n')
# fly forward (east) at least 20m
pitching_forward = True
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 20):
return False
# start timer
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# recenter pitch sticks once we reach home so we don't fly off again
if pitching_forward and home_distance < 10 :
pitching_forward = False
mavproxy.send('rc 2 1500\n')
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached home OK")
return True
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Fence test failed to reach home - timed out after %u seconds" % timeout)
return False
def show_gps_and_sim_positions(mavproxy, on_off):
if on_off == True:
# turn on simulator display of gps and actual position
mavproxy.send('map set showgpspos 1\n')
mavproxy.send('map set showsimpos 1\n')
else:
# turn off simulator display of gps and actual position
mavproxy.send('map set showgpspos 0\n')
mavproxy.send('map set showsimpos 0\n')
# fly_gps_glitch_loiter_test - fly south east in loiter and test reaction to gps glitch
def fly_gps_glitch_loiter_test(mavproxy, mav, timeout=30, max_distance=20):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# turn south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 150):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 4 1500\n')
# fly forward (south east) at least 60m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow down
if not wait_groundspeed(mav, 0, 1):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
success = True
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of the glitch list
if glitch_current >= glitch_num:
glitch_current = -1
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
else:
print("Applying glitch %u" % glitch_current)
#move onto the next glitch
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if (glitch_current == -1):
m = mav.recv_match(type='VFR_HUD', blocking=True)
curr_pos = sim_location(mav)
moved_distance = get_distance(curr_pos, start_pos)
print("Alt: %u Moved: %.0f" % (m.alt, moved_distance))
if moved_distance > max_distance:
print("Moved over %u meters, Failed!" % max_distance)
success = False
# disable gps glitch
if glitch_current != -1:
glitch_current = -1
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
show_gps_and_sim_positions(mavproxy, False)
if success:
print("GPS glitch test passed! stayed within %u meters for %u seconds" % (max_distance, timeout))
else:
print("GPS glitch test FAILED!")
return success
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(mavproxy, mav, timeout=30, max_distance=100):
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# Fly mission #1
print("# Load copter_glitch_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_glitch_mission.txt")):
print("load copter_glitch_mission failed")
return False
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# wait until 100m from home
if not wait_distance(mav, 100, 5, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
print("Applying glitch %u" % glitch_current)
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# turn off glitching
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
# continue with the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# wait for arrival back home
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
while dist_to_home > 5:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
print("Dist from home: %u" % dist_to_home)
# turn off simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, False)
print("GPS Glitch test Auto completed: passed=%s" % ret)
return ret
#fly_simple - assumes the simple bearing is initialised to be directly north
# flies a box with 100m west, 15 seconds north, 50 seconds east, 15 seconds south
def fly_simple(mavproxy, mav, side=50, timeout=120):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
#set SIMPLE mode for all flight modes
mavproxy.send('param set SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# fly south 50m
print("# Flying south %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly west 8 seconds
print("# Flying west for 8 seconds")
mavproxy.send('rc 2 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
# fly north 25 meters
print("# Flying north %u meters" % (side/2.0))
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side/2, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly east 8 seconds
print("# Flying east for 8 seconds")
mavproxy.send('rc 2 1700\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
#restore to default
mavproxy.send('param set SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(mavproxy, mav, timeout=45):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# fly forward 20m
print("# Flying forward 20 meters")
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, 20, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
#set SUPER SIMPLE mode for all flight modes
mavproxy.send('param set SUPER_SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# start copter yawing slowly
mavproxy.send('rc 4 1550\n')
# roll left for timeout seconds
print("# rolling left from pilot's point of view for %u seconds" % timeout)
mavproxy.send('rc 1 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + timeout):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
# stop rolling and yawing
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 4 1500\n')
#restore simple mode parameters to default
mavproxy.send('param set SUPER_SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_circle - flies a circle with 20m radius
def fly_circle(mavproxy, mav, maxaltchange=10, holdtime=36):
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# face west
print("turn west")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 270):
return False
mavproxy.send('rc 4 1500\n')
#set CIRCLE radius
mavproxy.send('param set CIRCLE_RADIUS 3000\n')
# fly forward (east) at least 100m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 100):
return False
# return pitch stick back to middle
mavproxy.send('rc 2 1500\n')
# set CIRCLE mode
mavproxy.send('switch 1\n') # circle mode
wait_mode(mav, 'CIRCLE')
# wait
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Circle at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("heading %u" % m.heading)
print("CIRCLE OK for %u seconds" % holdtime)
return True
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(mavproxy, mav):
# Fly mission #1
print("# Load copter_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_mission.txt")):
print("load copter_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("Auto mission completed: passed=%s" % ret)
return ret
# fly_avc_test - fly AVC mission
def fly_avc_test(mavproxy, mav):
# upload mission from file
print("# Load copter_AVC2013_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_AVC2013_mission.txt")):
print("load copter_AVC2013_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("Fly AVC mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("AVC mission completed: passed=%s" % ret)
return ret
def land(mavproxy, mav, timeout=60):
'''land the quad'''
print("STARTING LANDING")
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
print("Entered Landing Mode")
ret = wait_altitude(mav, -5, 1)
print("LANDING: ok= %s" % ret)
return ret
def fly_mission(mavproxy, mav, height_accuracy=-1, target_altitude=None):
'''fly a mission from a file'''
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
expect_msg = "Reached Command #%u" % (num_wp-1)
if (ret):
mavproxy.expect(expect_msg)
print("test: MISSION COMPLETE: passed=%s" % ret)
# wait here until ready
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
return ret
def load_mission_from_file(mavproxy, mav, filename):
'''Load a mission from a file to flight controller'''
global num_wp
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
# update num_wp
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return True
def save_mission_to_file(mavproxy, mav, filename):
global num_wp
mavproxy.send('wp save %s\n' % filename)
mavproxy.expect('Saved ([0-9]+) waypoints')
num_wp = int(mavproxy.match.group(1))
print("num_wp: %d" % num_wp)
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in range(1,9):
mavproxy.send('rc %u 1500\n' % chan)
# zero throttle
mavproxy.send('rc 3 1000\n')
def fly_ArduCopter(viewerip=None, map=False):
'''fly ArduCopter in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
sim_cmd = util.reltopdir('Tools/autotest/pysim/sim_wrapper.py') + ' --frame=%s --speedup=100 --rate=400 --home=%f,%f,%u,%u' % (
FRAME, HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sim_cmd += ' --wind=6,45,.3'
if viewerip:
sim_cmd += ' --fgout=%s:5503' % viewerip
sil = util.start_SIL('ArduCopter', wipe=True)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
sim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
sim.delaybeforesend = 0
util.pexpect_autoclose(sim)
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
util.pexpect_close(sim)
sil = util.start_SIL('ArduCopter', height=HOME.alt)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Logging to (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
sim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
sim.delaybeforesend = 0
util.pexpect_autoclose(sim)
buildlog = util.reltopdir("../buildlogs/ArduCopter-test.tlog")
print("buildlog=%s" % buildlog)
copyTLog = False
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
print( "WARN: Failed to create symlink: " + logfile + " => " + buildlog + ", Will copy tlog manually to target location" )
copyTLog = True
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sim, sil, mavproxy])
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a square in Stabilize mode
print("#")
print("########## Fly a square and save WPs with CH7 switch ##########")
print("#")
if not fly_square(mavproxy, mav):
failed_test_msg = "fly_square failed"
print(failed_test_msg)
failed = True
# save the stored mission to file
print("# Save out the CH7 mission to file")
if not save_mission_to_file(mavproxy, mav, os.path.join(testdir, "ch7_mission.txt")):
failed_test_msg = "save_mission_to_file failed"
print(failed_test_msg)
failed = True
# fly the stored mission
print("# Fly CH7 saved mission")
if not fly_mission(mavproxy, mav,height_accuracy = 0.5, target_altitude=10):
failed_test_msg = "fly ch7_mission failed"
print(failed_test_msg)
failed = True
# Throttle Failsafe
print("#")
print("########## Test Failsafe ##########")
print("#")
if not fly_throttle_failsafe(mavproxy, mav):
failed_test_msg = "fly_throttle_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Battery failsafe
if not fly_battery_failsafe(mavproxy, mav):
failed_test_msg = "fly_battery_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Stability patch
print("#")
print("########## Test Stability Patch ##########")
print("#")
if not fly_stability_patch(mavproxy, mav, 30):
failed_test_msg = "fly_stability_patch failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after stab patch failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fence test
print("#")
print("########## Test Horizontal Fence ##########")
print("#")
if not fly_fence_test(mavproxy, mav, 180):
failed_test_msg = "fly_fence_test failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch Loiter test
print("# GPS Glitch Loiter Test")
if not fly_gps_glitch_loiter_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_loiter_test failed"
print(failed_test_msg)
failed = True
# RTL after GPS Glitch Loiter test
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch test in auto mode
print("# GPS Glitch Auto Test")
if not fly_gps_glitch_auto_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_auto_test failed"
print(failed_test_msg)
failed = True
# take-off ahead of next test
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Loiter for 10 seconds
print("#")
print("########## Test Loiter for 10 seconds ##########")
print("#")
if not loiter(mavproxy, mav):
failed_test_msg = "loiter failed"
print(failed_test_msg)
failed = True
# Loiter Climb
print("#")
print("# Loiter - climb to 30m")
print("#")
if not change_alt(mavproxy, mav, 30):
failed_test_msg = "change_alt climb failed"
print(failed_test_msg)
failed = True
# Loiter Descend
print("#")
print("# Loiter - descend to 20m")
print("#")
if not change_alt(mavproxy, mav, 20):
failed_test_msg = "change_alt descend failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after Loiter climb/descend failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Simple mode
print("# Fly in SIMPLE mode")
if not fly_simple(mavproxy, mav):
failed_test_msg = "fly_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a circle in super simple mode
print("# Fly a circle in SUPER SIMPLE mode")
if not fly_super_simple(mavproxy, mav):
failed_test_msg = "fly_super_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after super simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Circle mode
print("# Fly CIRCLE mode")
if not fly_circle(mavproxy, mav):
failed_test_msg = "fly_circle failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after circle failed"
print(failed_test_msg)
failed = True
print("# Fly copter mission")
if not fly_auto_test(mavproxy, mav):
failed_test_msg = "fly_auto_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew copter mission OK")
# wait for disarm
mav.motors_disarmed_wait()
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/ArduCopter-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
util.pexpect_close(sim)
if os.path.exists('ArduCopter-valgrind.log'):
os.chmod('ArduCopter-valgrind.log', 0644)
shutil.copy("ArduCopter-valgrind.log", util.reltopdir("../buildlogs/ArduCopter-valgrind.log"))
# [2014/05/07] FC Because I'm doing a cross machine build (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if copyTLog:
shutil.copy(logfile, buildlog)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
def fly_CopterAVC(viewerip=None, map=False):
'''fly ArduCopter in SIL for AVC2013 mission
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
sim_cmd = util.reltopdir('Tools/autotest/pysim/sim_wrapper.py') + ' --frame=%s --rate=400 --speedup=100 --home=%f,%f,%u,%u' % (
FRAME, AVCHOME.lat, AVCHOME.lng, AVCHOME.alt, AVCHOME.heading)
if viewerip:
sim_cmd += ' --fgout=%s:5503' % viewerip
sil = util.start_SIL('ArduCopter', wipe=True)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
sim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
sim.delaybeforesend = 0
util.pexpect_autoclose(sim)
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_AVC2013_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
util.pexpect_close(sim)
sil = util.start_SIL('ArduCopter', height=HOME.alt)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Logging to (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
sim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
sim.delaybeforesend = 0
util.pexpect_autoclose(sim)
buildlog = util.reltopdir("../buildlogs/CopterAVC-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sim, sil, mavproxy])
if map:
mavproxy.send('map icon 40.072467969730496 -105.2314389590174\n')
mavproxy.send('map icon 40.072600990533829 -105.23146100342274\n')
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Fly AVC mission")
if not fly_avc_test(mavproxy, mav):
failed_test_msg = "fly_avc_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew AVC mission OK")
#mission includes disarm at end so should be ok to download logs now
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/CopterAVC-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
util.pexpect_close(sim)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
|
|
from collections import OrderedDict
from functools import partial
from classytags.utils import flatten_context
from django.contrib.sites.models import Site
from django.template import Context
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from cms.cache.placeholder import get_placeholder_cache, set_placeholder_cache
from cms.toolbar.utils import (
get_placeholder_toolbar_js, get_plugin_toolbar_js,
get_toolbar_from_request,
)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import has_plugin_permission
from cms.utils.placeholder import (
get_toolbar_plugin_struct, restore_sekizai_context,
)
from cms.utils.plugins import get_plugin_restrictions
def _unpack_plugins(parent_plugin):
found_plugins = []
for plugin in parent_plugin.child_plugin_instances or []:
found_plugins.append(plugin)
if plugin.child_plugin_instances:
found_plugins.extend(_unpack_plugins(plugin))
return found_plugins
class RenderedPlaceholder:
__slots__ = (
'language',
'site_id',
'cached',
'editable',
'placeholder',
'has_content',
)
def __init__(self, placeholder, language, site_id, cached=False,
editable=False, has_content=False):
self.language = language
self.site_id = site_id
self.cached = cached
self.editable = editable
self.placeholder = placeholder
self.has_content = has_content
def __eq__(self, other):
# The same placeholder rendered with different
# parameters is considered the same.
# This behavior is compatible with previous djangoCMS releases.
return self.placeholder == other.placeholder
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.placeholder)
class BaseRenderer:
load_structure = False
placeholder_edit_template = ''
def __init__(self, request):
self.request = request
self.request_language = get_language_from_request(self.request)
self._cached_templates = {}
self._cached_plugin_classes = {}
self._placeholders_content_cache = {}
self._placeholders_by_page_cache = {}
self._rendered_placeholders = OrderedDict()
self._rendered_static_placeholders = OrderedDict()
self._rendered_plugins_by_placeholder = {}
@cached_property
def current_page(self):
return self.request.current_page
@cached_property
def current_site(self):
return Site.objects.get_current(self.request)
@cached_property
def toolbar(self):
return get_toolbar_from_request(self.request)
@cached_property
def templates(self):
return self.toolbar.templates
@cached_property
def plugin_pool(self):
import cms.plugin_pool
return cms.plugin_pool.plugin_pool
def get_placeholder_plugin_menu(self, placeholder, page=None):
registered_plugins = self.plugin_pool.registered_plugins
can_add_plugin = partial(has_plugin_permission, user=self.request.user, permission_type='add')
plugins = [plugin for plugin in registered_plugins if can_add_plugin(plugin_type=plugin.value)]
plugin_menu = get_toolbar_plugin_struct(
plugins=plugins,
slot=placeholder.slot,
page=page,
)
plugin_menu_template = self.templates.placeholder_plugin_menu_template
return plugin_menu_template.render({'plugin_menu': plugin_menu})
def get_placeholder_toolbar_js(self, placeholder, page=None):
plugins = self.plugin_pool.get_all_plugins(placeholder.slot, page) # original
plugin_types = [cls.__name__ for cls in plugins]
allowed_plugins = plugin_types + self.plugin_pool.get_system_plugins()
placeholder_toolbar_js = get_placeholder_toolbar_js(
placeholder=placeholder,
allowed_plugins=allowed_plugins,
)
return placeholder_toolbar_js
def get_plugin_toolbar_js(self, plugin, page=None):
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(plugin.placeholder_id, {})
child_classes, parent_classes = get_plugin_restrictions(
plugin=plugin,
page=page,
restrictions_cache=placeholder_cache,
)
content = get_plugin_toolbar_js(
plugin,
children=child_classes,
parents=parent_classes,
)
return content
def get_plugin_class(self, plugin):
plugin_type = plugin.plugin_type
if not plugin_type in self._cached_plugin_classes:
self._cached_plugin_classes[plugin_type] = self.plugin_pool.get_plugin(plugin_type)
return self._cached_plugin_classes[plugin_type]
def get_plugins_to_render(self, placeholder, language, template):
from cms.utils.plugins import get_plugins
plugins = get_plugins(
request=self.request,
placeholder=placeholder,
template=template,
lang=language,
)
return plugins
def get_rendered_plugins_cache(self, placeholder):
blank = {
'plugins': [],
'plugin_parents': {},
'plugin_children': {},
}
return self._rendered_plugins_by_placeholder.get(placeholder.pk, blank)
def get_rendered_placeholders(self):
rendered = list(self._rendered_placeholders.values())
return [r.placeholder for r in rendered]
def get_rendered_editable_placeholders(self):
rendered = list(self._rendered_placeholders.values())
return [r.placeholder for r in rendered if r.editable]
def get_rendered_static_placeholders(self):
return list(self._rendered_static_placeholders.values())
class ContentRenderer(BaseRenderer):
plugin_edit_template = (
'<template class="cms-plugin '
'cms-plugin-start cms-plugin-{pk}"></template>{content}'
'<template class="cms-plugin cms-plugin-end cms-plugin-{pk}"></template>'
)
placeholder_edit_template = (
'{content} '
'<div class="cms-placeholder cms-placeholder-{placeholder_id}"></div> '
'<script data-cms>{plugin_js}\n{placeholder_js}</script>'
)
def __init__(self, request):
super().__init__(request)
self._placeholders_are_editable = bool(self.toolbar.edit_mode_active)
def placeholder_cache_is_enabled(self):
if not get_cms_setting('PLACEHOLDER_CACHE'):
return False
if self.request.user.is_staff:
return False
return not self._placeholders_are_editable
def render_placeholder(self, placeholder, context, language=None, page=None,
editable=False, use_cache=False, nodelist=None, width=None):
from sekizai.helpers import Watcher
language = language or self.request_language
editable = editable and self._placeholders_are_editable
if use_cache and not editable and placeholder.cache_placeholder:
use_cache = self.placeholder_cache_is_enabled()
else:
use_cache = False
if use_cache:
cached_value = self._get_cached_placeholder_content(
placeholder=placeholder,
language=language,
)
else:
cached_value = None
if cached_value is not None:
# User has opted to use the cache
# and there is something in the cache
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
context.push()
width = width or placeholder.default_width
template = page.get_template() if page else None
if width:
context['width'] = width
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
for key, value in placeholder.get_extra_context(template).items():
if key not in context:
context[key] = value
if use_cache:
watcher = Watcher(context)
plugin_content = self.render_plugins(
placeholder,
language=language,
context=context,
editable=editable,
template=template,
)
placeholder_content = ''.join(plugin_content)
if not placeholder_content and nodelist:
# should be nodelist from a template
placeholder_content = nodelist.render(context)
if use_cache:
content = {
'content': placeholder_content,
'sekizai': watcher.get_changes(),
}
set_placeholder_cache(
placeholder,
lang=language,
site_id=self.current_site.pk,
content=content,
request=self.request,
)
rendered_placeholder = RenderedPlaceholder(
placeholder=placeholder,
language=language,
site_id=self.current_site.pk,
cached=use_cache,
editable=editable,
has_content=bool(placeholder_content),
)
if placeholder.pk not in self._rendered_placeholders:
# First time this placeholder is rendered
if not self.toolbar._cache_disabled:
# The toolbar middleware needs to know if the response
# is to be cached.
# Set the _cache_disabled flag to the value of cache_placeholder
# only if the flag is False (meaning cache is enabled).
self.toolbar._cache_disabled = not use_cache
self._rendered_placeholders[placeholder.pk] = rendered_placeholder
if editable:
data = self.get_editable_placeholder_context(placeholder, page=page)
data['content'] = placeholder_content
placeholder_content = self.placeholder_edit_template.format(**data)
context.pop()
return mark_safe(placeholder_content)
def get_editable_placeholder_context(self, placeholder, page=None):
placeholder_cache = self.get_rendered_plugins_cache(placeholder)
placeholder_toolbar_js = self.get_placeholder_toolbar_js(placeholder, page)
plugin_toolbar_js_bits = (self.get_plugin_toolbar_js(plugin, page=page)
for plugin in placeholder_cache['plugins'])
context = {
'plugin_js': ''.join(plugin_toolbar_js_bits),
'placeholder_js': placeholder_toolbar_js,
'placeholder_id': placeholder.pk,
}
return context
def render_page_placeholder(self, slot, context, inherit,
page=None, nodelist=None, editable=True):
if not self.current_page:
# This method should only be used when rendering a cms page.
return ''
current_page = page or self.current_page
placeholder_cache = self._placeholders_by_page_cache
if current_page.pk not in placeholder_cache:
# Instead of loading plugins for this one placeholder
# try and load them for all placeholders on the page.
self._preload_placeholders_for_page(current_page)
try:
placeholder = placeholder_cache[current_page.pk][slot]
except KeyError:
content = ''
placeholder = None
else:
content = self.render_placeholder(
placeholder,
context=context,
page=current_page,
editable=editable,
use_cache=True,
nodelist=None,
)
parent_page = current_page.parent_page
should_inherit = (
inherit and not content and parent_page # noqa: W503
# The placeholder cache is primed when the first placeholder
# is loaded. If the current page's parent is not in there,
# it means its cache was never primed as it wasn't necessary.
and parent_page.pk in placeholder_cache # noqa: W503
# don't display inherited plugins in edit mode, so that the user doesn't
# mistakenly edit/delete them. This is a fix for issue #1303. See the discussion
# there for possible enhancements
and not self.toolbar.edit_mode_active # noqa: W503
)
if should_inherit:
# nodelist is set to None to avoid rendering the nodes inside
# a {% placeholder or %} block tag.
content = self.render_page_placeholder(
slot,
context,
inherit=True,
page=parent_page,
nodelist=None,
editable=False,
)
if placeholder and (editable and self._placeholders_are_editable):
# In edit mode, the contents of the placeholder are mixed with our
# internal toolbar markup, so the content variable will always be True.
# Use the rendered placeholder has_content flag instead.
has_content = self._rendered_placeholders[placeholder.pk].has_content
else:
# User is not in edit mode or the placeholder doesn't exist.
# Either way, we can trust the content variable.
has_content = bool(content)
if not has_content and nodelist:
return content + nodelist.render(context)
return content
def render_static_placeholder(self, static_placeholder, context, nodelist=None):
user = self.request.user
if self.toolbar.edit_mode_active and user.has_perm('cms.edit_static_placeholder'):
placeholder = static_placeholder.draft
editable = True
use_cache = False
else:
placeholder = static_placeholder.public
editable = False
use_cache = True
# I really don't like these impromptu flags...
placeholder.is_static = True
content = self.render_placeholder(
placeholder,
context=context,
editable=editable,
use_cache=use_cache,
nodelist=nodelist,
)
if static_placeholder.pk not in self._rendered_static_placeholders:
# First time this static placeholder is rendered
self._rendered_static_placeholders[static_placeholder.pk] = static_placeholder
return content
def render_plugin(self, instance, context, placeholder=None, editable=False):
if not placeholder:
placeholder = instance.placeholder
instance, plugin = instance.get_plugin_instance()
if not instance or not plugin.render_plugin:
return ''
# we'd better pass a flat dict to template.render
# as plugin.render can return pretty much any kind of context / dictionary
# we'd better flatten it and force to a Context object
# flattening the context means that template must be an engine-specific template object
# which is guaranteed by get_cached_template if the template returned by
# plugin._get_render_template is either a string or an engine-specific template object
context = PluginContext(context, instance, placeholder)
context = plugin.render(context, instance, placeholder.slot)
context = flatten_context(context)
template = plugin._get_render_template(context, instance, placeholder)
template = self.templates.get_cached_template(template)
content = template.render(context)
for path in get_cms_setting('PLUGIN_PROCESSORS'):
processor = import_string(path)
content = processor(instance, placeholder, content, context)
if editable:
content = self.plugin_edit_template.format(pk=instance.pk, content=content)
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(placeholder.pk, {})
placeholder_cache.setdefault('plugins', []).append(instance)
return mark_safe(content)
def render_plugins(self, placeholder, language, context, editable=False, template=None):
plugins = self.get_plugins_to_render(
placeholder=placeholder,
template=template,
language=language,
)
for plugin in plugins:
plugin._placeholder_cache = placeholder
yield self.render_plugin(plugin, context, placeholder, editable)
def _get_cached_placeholder_content(self, placeholder, language):
"""
Returns a dictionary mapping placeholder content and sekizai data.
Returns None if no cache is present.
"""
# Placeholders can be rendered multiple times under different sites
# it's important to have a per-site "cache".
site_id = self.current_site.pk
site_cache = self._placeholders_content_cache.setdefault(site_id, {})
# Placeholders can be rendered multiple times under different languages
# it's important to have a per-language "cache".
language_cache = site_cache.setdefault(language, {})
if placeholder.pk not in language_cache:
cached_value = get_placeholder_cache(
placeholder,
lang=language,
site_id=site_id,
request=self.request,
)
if cached_value is not None:
# None means nothing in the cache
# Anything else is a valid value
language_cache[placeholder.pk] = cached_value
return language_cache.get(placeholder.pk)
def _preload_placeholders_for_page(self, page, slots=None, inherit=False):
"""
Populates the internal plugin cache of each placeholder
in the given page if the placeholder has not been
previously cached.
"""
from cms.utils.plugins import assign_plugins
if slots:
placeholders = page.get_placeholders().filter(slot__in=slots)
else:
# Creates any placeholders missing on the page
placeholders = page.rescan_placeholders().values()
if inherit:
# When the inherit flag is True,
# assume all placeholders found are inherited and thus prefetch them.
slots_w_inheritance = [pl.slot for pl in placeholders]
elif not self.toolbar.edit_mode_active:
# Scan through the page template to find all placeholders
# that have inheritance turned on.
slots_w_inheritance = [pl.slot for pl in page.get_declared_placeholders() if pl.inherit]
else:
# Inheritance is turned off on edit-mode
slots_w_inheritance = []
if self.placeholder_cache_is_enabled():
_cached_content = self._get_cached_placeholder_content
# Only prefetch plugins if the placeholder
# has not been cached.
placeholders_to_fetch = [
placeholder for placeholder in placeholders
if _cached_content(placeholder, self.request_language) is None
]
else:
# cache is disabled, prefetch plugins for all
# placeholders in the page.
placeholders_to_fetch = placeholders
if placeholders_to_fetch:
assign_plugins(
request=self.request,
placeholders=placeholders_to_fetch,
template=page.get_template(),
lang=self.request_language,
is_fallback=inherit,
)
parent_page = page.parent_page
# Inherit only placeholders that have no plugins
# or are not cached.
placeholders_to_inherit = [
pl.slot for pl in placeholders
if not getattr(pl, '_plugins_cache', None) and pl.slot in slots_w_inheritance
]
if parent_page and placeholders_to_inherit:
self._preload_placeholders_for_page(
page=parent_page,
slots=placeholders_to_inherit,
inherit=True,
)
# Internal cache mapping placeholder slots
# to placeholder instances.
page_placeholder_cache = {}
for placeholder in placeholders:
# Save a query when the placeholder toolbar is rendered.
placeholder.page = page
page_placeholder_cache[placeholder.slot] = placeholder
self._placeholders_by_page_cache[page.pk] = page_placeholder_cache
class StructureRenderer(BaseRenderer):
load_structure = True
placeholder_edit_template = (
"""
<script data-cms id="cms-plugin-child-classes-{placeholder_id}" type="text/cms-template">
{plugin_menu_js}
</script>
<script data-cms>{plugin_js}\n{placeholder_js}</script>
"""
)
def get_plugins_to_render(self, *args, **kwargs):
plugins = super().get_plugins_to_render(*args, **kwargs)
for plugin in plugins:
yield plugin
if not plugin.child_plugin_instances:
continue
for plugin in _unpack_plugins(plugin):
yield plugin
def render_placeholder(self, placeholder, language, page=None):
rendered_plugins = self.render_plugins(placeholder, language=language, page=page)
plugin_js_output = ''.join(rendered_plugins)
placeholder_toolbar_js = self.get_placeholder_toolbar_js(placeholder, page)
rendered_placeholder = RenderedPlaceholder(
placeholder=placeholder,
language=language,
site_id=self.current_site.pk,
cached=False,
editable=True,
)
if placeholder.pk not in self._rendered_placeholders:
self._rendered_placeholders[placeholder.pk] = rendered_placeholder
placeholder_structure_is = self.placeholder_edit_template.format(
placeholder_id=placeholder.pk,
plugin_js=plugin_js_output,
plugin_menu_js=self.get_placeholder_plugin_menu(placeholder, page=page),
placeholder_js=placeholder_toolbar_js,
)
return mark_safe(placeholder_structure_is)
def render_page_placeholder(self, page, placeholder, language=None):
return self.render_placeholder(placeholder, language=language, page=page)
def render_static_placeholder(self, static_placeholder, language=None):
user = self.request.user
if not user.has_perm('cms.edit_static_placeholder'):
return ''
language = language or self.request_language
placeholder = static_placeholder.draft
# I really don't like these impromptu flags...
placeholder.is_static = True
content = self.render_placeholder(placeholder, language=language)
if static_placeholder.pk not in self._rendered_static_placeholders:
# First time this static placeholder is rendered
self._rendered_static_placeholders[static_placeholder.pk] = static_placeholder
return content
def render_plugin(self, instance, page=None):
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(instance.placeholder_id, {})
placeholder_cache.setdefault('plugins', []).append(instance)
return self.get_plugin_toolbar_js(instance, page=page)
def render_plugins(self, placeholder, language, page=None):
template = page.get_template() if page else None
plugins = self.get_plugins_to_render(placeholder, language, template)
for plugin in plugins:
plugin._placeholder_cache = placeholder
yield self.render_plugin(plugin, page=page)
class LegacyRenderer(ContentRenderer):
load_structure = True
placeholder_edit_template = (
"""
{content}
<div class="cms-placeholder cms-placeholder-{placeholder_id}"></div>
<script data-cms id="cms-plugin-child-classes-{placeholder_id}" type="text/cms-template">
{plugin_menu_js}
</script>
<script data-cms>{plugin_js}\n{placeholder_js}</script>
"""
)
def get_editable_placeholder_context(self, placeholder, page=None):
context = super().get_editable_placeholder_context(placeholder, page)
context['plugin_menu_js'] = self.get_placeholder_plugin_menu(placeholder, page=page)
return context
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict_, instance, placeholder, processors=None, current_app=None):
dict_ = flatten_context(dict_)
super().__init__(dict_)
if not processors:
processors = []
for path in get_cms_setting('PLUGIN_CONTEXT_PROCESSORS'):
processor = import_string(path)
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
|
|
#
# File : menuconfig.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-12-29 Bernard The first version
# 2018-07-31 weety Support pyconfig
# 2019-07-13 armink Support guiconfig
import os
import re
import sys
import shutil
import hashlib
import operator
# make rtconfig.h from .config
def is_pkg_special_config(config_str):
''' judge if it's CONFIG_PKG_XX_PATH or CONFIG_PKG_XX_VER'''
if type(config_str) == type('a'):
if config_str.startswith("PKG_") and (config_str.endswith('_PATH') or config_str.endswith('_VER')):
return True
return False
def mk_rtconfig(filename):
try:
config = open(filename, 'r')
except:
print('open config:%s failed' % filename)
return
rtconfig = open('rtconfig.h', 'w')
rtconfig.write('#ifndef RT_CONFIG_H__\n')
rtconfig.write('#define RT_CONFIG_H__\n\n')
empty_line = 1
for line in config:
line = line.lstrip(' ').replace('\n', '').replace('\r', '')
if len(line) == 0:
continue
if line[0] == '#':
if len(line) == 1:
if empty_line:
continue
rtconfig.write('\n')
empty_line = 1
continue
if line.startswith('# CONFIG_'):
line = ' ' + line[9:]
else:
line = line[1:]
rtconfig.write('/*%s */\n' % line)
empty_line = 0
else:
empty_line = 0
setting = line.split('=')
if len(setting) >= 2:
if setting[0].startswith('CONFIG_'):
setting[0] = setting[0][7:]
# remove CONFIG_PKG_XX_PATH or CONFIG_PKG_XX_VER
if is_pkg_special_config(setting[0]):
continue
if setting[1] == 'y':
rtconfig.write('#define %s\n' % setting[0])
else:
rtconfig.write('#define %s %s\n' % (setting[0], re.findall(r"^.*?=(.*)$",line)[0]))
if os.path.isfile('rtconfig_project.h'):
rtconfig.write('#include "rtconfig_project.h"\n')
rtconfig.write('\n')
rtconfig.write('#endif\n')
rtconfig.close()
def get_file_md5(file):
MD5 = hashlib.new('md5')
with open(file, 'r') as fp:
MD5.update(fp.read().encode('utf8'))
fp_md5 = MD5.hexdigest()
return fp_md5
def config():
mk_rtconfig('.config')
def get_env_dir():
if os.environ.get('ENV_ROOT'):
return os.environ.get('ENV_ROOT')
if sys.platform == 'win32':
home_dir = os.environ['USERPROFILE']
env_dir = os.path.join(home_dir, '.env')
else:
home_dir = os.environ['HOME']
env_dir = os.path.join(home_dir, '.env')
if not os.path.exists(env_dir):
return None
return env_dir
def help_info():
print("**********************************************************************************\n"
"* Help infomation:\n"
"* Git tool install step.\n"
"* If your system is linux, you can use command below to install git.\n"
"* $ sudo yum install git\n"
"* $ sudo apt-get install git\n"
"* If your system is windows, you should download git software(msysGit).\n"
"* Download path: http://git-scm.com/download/win\n"
"* After you install it, be sure to add the git command execution PATH \n"
"* to your system PATH.\n"
"* Usually, git command PATH is $YOUR_INSTALL_DIR\\Git\\bin\n"
"* If your system is OSX, please download git and install it.\n"
"* Download path: http://git-scm.com/download/mac\n"
"**********************************************************************************\n")
def touch_env():
if sys.platform != 'win32':
home_dir = os.environ['HOME']
else:
home_dir = os.environ['USERPROFILE']
env_dir = os.path.join(home_dir, '.env')
if not os.path.exists(env_dir):
os.mkdir(env_dir)
os.mkdir(os.path.join(env_dir, 'local_pkgs'))
os.mkdir(os.path.join(env_dir, 'packages'))
os.mkdir(os.path.join(env_dir, 'tools'))
kconfig = open(os.path.join(env_dir, 'packages', 'Kconfig'), 'w')
kconfig.close()
if not os.path.exists(os.path.join(env_dir, 'packages', 'packages')):
try:
ret = os.system('git clone https://github.com/RT-Thread/packages.git %s' % os.path.join(env_dir, 'packages', 'packages'))
if ret != 0:
shutil.rmtree(os.path.join(env_dir, 'packages', 'packages'))
print("********************************************************************************\n"
"* Warnning:\n"
"* Run command error for \"git clone https://github.com/RT-Thread/packages.git\".\n"
"* This error may have been caused by not found a git tool or network error.\n"
"* If the git tool is not installed, install the git tool first.\n"
"* If the git utility is installed, check whether the git command is added to \n"
"* the system PATH.\n"
"* This error may cause the RT-Thread packages to not work properly.\n"
"********************************************************************************\n")
help_info()
else:
kconfig = open(os.path.join(env_dir, 'packages', 'Kconfig'), 'w')
kconfig.write('source "$PKGS_DIR/packages/Kconfig"')
kconfig.close()
except:
print("**********************************************************************************\n"
"* Warnning:\n"
"* Run command error for \"git clone https://github.com/RT-Thread/packages.git\". \n"
"* This error may have been caused by not found a git tool or git tool not in \n"
"* the system PATH. \n"
"* This error may cause the RT-Thread packages to not work properly. \n"
"**********************************************************************************\n")
help_info()
if not os.path.exists(os.path.join(env_dir, 'tools', 'scripts')):
try:
ret = os.system('git clone https://github.com/RT-Thread/env.git %s' % os.path.join(env_dir, 'tools', 'scripts'))
if ret != 0:
shutil.rmtree(os.path.join(env_dir, 'tools', 'scripts'))
print("********************************************************************************\n"
"* Warnning:\n"
"* Run command error for \"git clone https://github.com/RT-Thread/env.git\".\n"
"* This error may have been caused by not found a git tool or network error.\n"
"* If the git tool is not installed, install the git tool first.\n"
"* If the git utility is installed, check whether the git command is added \n"
"* to the system PATH.\n"
"* This error may cause script tools to fail to work properly.\n"
"********************************************************************************\n")
help_info()
except:
print("********************************************************************************\n"
"* Warnning:\n"
"* Run command error for \"git clone https://github.com/RT-Thread/env.git\". \n"
"* This error may have been caused by not found a git tool or git tool not in \n"
"* the system PATH. \n"
"* This error may cause script tools to fail to work properly. \n"
"********************************************************************************\n")
help_info()
if sys.platform != 'win32':
env_sh = open(os.path.join(env_dir, 'env.sh'), 'w')
env_sh.write('export PATH=~/.env/tools/scripts:$PATH')
else:
if os.path.exists(os.path.join(env_dir, 'tools', 'scripts')):
os.environ["PATH"] = os.path.join(env_dir, 'tools', 'scripts') + ';' + os.environ["PATH"]
# menuconfig for Linux
def menuconfig(RTT_ROOT):
kconfig_dir = os.path.join(RTT_ROOT, 'tools', 'kconfig-frontends')
os.system('scons -C ' + kconfig_dir)
touch_env()
env_dir = get_env_dir()
os.environ['PKGS_ROOT'] = os.path.join(env_dir, 'packages')
fn = '.config'
fn_old = '.config.old'
kconfig_cmd = os.path.join(RTT_ROOT, 'tools', 'kconfig-frontends', 'kconfig-mconf')
os.system(kconfig_cmd + ' Kconfig')
if os.path.isfile(fn):
if os.path.isfile(fn_old):
diff_eq = operator.eq(get_file_md5(fn), get_file_md5(fn_old))
else:
diff_eq = False
else:
sys.exit(-1)
# make rtconfig.h
if diff_eq == False:
shutil.copyfile(fn, fn_old)
mk_rtconfig(fn)
# guiconfig for windows and linux
def guiconfig(RTT_ROOT):
import pyguiconfig
if sys.platform != 'win32':
touch_env()
env_dir = get_env_dir()
os.environ['PKGS_ROOT'] = os.path.join(env_dir, 'packages')
fn = '.config'
fn_old = '.config.old'
sys.argv = ['guiconfig', 'Kconfig'];
pyguiconfig._main()
if os.path.isfile(fn):
if os.path.isfile(fn_old):
diff_eq = operator.eq(get_file_md5(fn), get_file_md5(fn_old))
else:
diff_eq = False
else:
sys.exit(-1)
# make rtconfig.h
if diff_eq == False:
shutil.copyfile(fn, fn_old)
mk_rtconfig(fn)
# guiconfig for windows and linux
def guiconfig_silent(RTT_ROOT):
import defconfig
if sys.platform != 'win32':
touch_env()
env_dir = get_env_dir()
os.environ['PKGS_ROOT'] = os.path.join(env_dir, 'packages')
fn = '.config'
sys.argv = ['defconfig', '--kconfig', 'Kconfig', '.config']
defconfig.main()
# silent mode, force to make rtconfig.h
mk_rtconfig(fn)
|
|
# -*- test-case-name: twisted.python.test.test_release -*-
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted's automated release system.
This module is only for use within Twisted's release system. If you are anyone
else, do not use it. The interface and behaviour will change without notice.
Only Linux is supported by this code. It should not be used by any tools
which must run on multiple platforms (eg the setup.py script).
"""
import textwrap
from datetime import date
import re
import sys
import os
from tempfile import mkdtemp
import tarfile
from subprocess import PIPE, STDOUT, Popen
from twisted.python.versions import Version
from twisted.python.filepath import FilePath
from twisted.python.dist import twisted_subprojects
# This import is an example of why you shouldn't use this module unless you're
# radix
try:
from twisted.lore.scripts import lore
except ImportError:
pass
# The offset between a year and the corresponding major version number.
VERSION_OFFSET = 2000
# The list of subproject names to exclude from the main Twisted tarball and
# for which no individual project tarballs will be built.
PROJECT_BLACKLIST = ["vfs", "web2"]
def runCommand(args):
"""
Execute a vector of arguments.
@type args: C{list} of C{str}
@param args: A list of arguments, the first of which will be used as the
executable to run.
@rtype: C{str}
@return: All of the standard output.
@raise CommandFailed: when the program exited with a non-0 exit code.
"""
process = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = process.stdout.read()
exitCode = process.wait()
if exitCode < 0:
raise CommandFailed(None, -exitCode, stdout)
elif exitCode > 0:
raise CommandFailed(exitCode, None, stdout)
return stdout
class CommandFailed(Exception):
"""
Raised when a child process exits unsuccessfully.
@type exitStatus: C{int}
@ivar exitStatus: The exit status for the child process.
@type exitSignal: C{int}
@ivar exitSignal: The exit signal for the child process.
@type output: C{str}
@ivar output: The bytes read from stdout and stderr of the child process.
"""
def __init__(self, exitStatus, exitSignal, output):
Exception.__init__(self, exitStatus, exitSignal, output)
self.exitStatus = exitStatus
self.exitSignal = exitSignal
self.output = output
def _changeVersionInFile(old, new, filename):
"""
Replace the C{old} version number with the C{new} one in the given
C{filename}.
"""
replaceInFile(filename, {old.base(): new.base()})
def getNextVersion(version, now=None):
"""
Calculate the version number for a new release of Twisted based on
the previous version number.
@param version: The previous version number.
@param now: (optional) The current date.
"""
# XXX: This has no way of incrementing the patch number. Currently, we
# don't need it. See bug 2915. Jonathan Lange, 2007-11-20.
if now is None:
now = date.today()
major = now.year - VERSION_OFFSET
if major != version.major:
minor = 0
else:
minor = version.minor + 1
return Version(version.package, major, minor, 0)
def changeAllProjectVersions(root, versionTemplate, today=None):
"""
Change the version of all projects (including core and all subprojects).
If the current version of a project is pre-release, then also change the
versions in the current NEWS entries for that project.
@type root: L{FilePath}
@param root: The root of the Twisted source tree.
@type versionTemplate: L{Version}
@param versionTemplate: The version of all projects. The name will be
replaced for each respective project.
@type today: C{str}
@param today: A YYYY-MM-DD formatted string. If not provided, defaults to
the current day, according to the system clock.
"""
if not today:
today = date.today().strftime('%Y-%m-%d')
for project in findTwistedProjects(root):
if project.directory.basename() == "twisted":
packageName = "twisted"
else:
packageName = "twisted." + project.directory.basename()
oldVersion = project.getVersion()
newVersion = Version(packageName, versionTemplate.major,
versionTemplate.minor, versionTemplate.micro,
prerelease=versionTemplate.prerelease)
if oldVersion.prerelease:
builder = NewsBuilder()
builder._changeNewsVersion(
root.child("NEWS"), builder._getNewsName(project),
oldVersion, newVersion, today)
builder._changeNewsVersion(
project.directory.child("topfiles").child("NEWS"),
builder._getNewsName(project), oldVersion, newVersion,
today)
# The placement of the top-level README with respect to other files (eg
# _version.py) is sufficiently different from the others that we just
# have to handle it specially.
if packageName == "twisted":
_changeVersionInFile(
oldVersion, newVersion, root.child('README').path)
project.updateVersion(newVersion)
class Project(object):
"""
A representation of a project that has a version.
@ivar directory: A L{twisted.python.filepath.FilePath} pointing to the base
directory of a Twisted-style Python package. The package should contain
a C{_version.py} file and a C{topfiles} directory that contains a
C{README} file.
"""
def __init__(self, directory):
self.directory = directory
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__, self.directory)
def getVersion(self):
"""
@return: A L{Version} specifying the version number of the project
based on live python modules.
"""
namespace = {}
execfile(self.directory.child("_version.py").path, namespace)
return namespace["version"]
def updateVersion(self, version):
"""
Replace the existing version numbers in _version.py and README files
with the specified version.
"""
oldVersion = self.getVersion()
replaceProjectVersion(self.directory.child("_version.py").path,
version)
_changeVersionInFile(
oldVersion, version,
self.directory.child("topfiles").child("README").path)
def findTwistedProjects(baseDirectory):
"""
Find all Twisted-style projects beneath a base directory.
@param baseDirectory: A L{twisted.python.filepath.FilePath} to look inside.
@return: A list of L{Project}.
"""
projects = []
for filePath in baseDirectory.walk():
if filePath.basename() == 'topfiles':
projectDirectory = filePath.parent()
projects.append(Project(projectDirectory))
return projects
def updateTwistedVersionInformation(baseDirectory, now):
"""
Update the version information for Twisted and all subprojects to the
date-based version number.
@param baseDirectory: Where to look for Twisted. If None, the function
infers the information from C{twisted.__file__}.
@param now: The current date (as L{datetime.date}). If None, it defaults
to today.
"""
for project in findTwistedProjects(baseDirectory):
project.updateVersion(getNextVersion(project.getVersion(), now=now))
def generateVersionFileData(version):
"""
Generate the data to be placed into a _version.py file.
@param version: A version object.
"""
if version.prerelease is not None:
prerelease = ", prerelease=%r" % (version.prerelease,)
else:
prerelease = ""
data = '''\
# This is an auto-generated file. Do not edit it.
from twisted.python import versions
version = versions.Version(%r, %s, %s, %s%s)
''' % (version.package, version.major, version.minor, version.micro, prerelease)
return data
def replaceProjectVersion(filename, newversion):
"""
Write version specification code into the given filename, which
sets the version to the given version number.
@param filename: A filename which is most likely a "_version.py"
under some Twisted project.
@param newversion: A version object.
"""
# XXX - this should be moved to Project and renamed to writeVersionFile.
# jml, 2007-11-15.
f = open(filename, 'w')
f.write(generateVersionFileData(newversion))
f.close()
def replaceInFile(filename, oldToNew):
"""
I replace the text `oldstr' with `newstr' in `filename' using science.
"""
os.rename(filename, filename+'.bak')
f = open(filename+'.bak')
d = f.read()
f.close()
for k,v in oldToNew.items():
d = d.replace(k, v)
f = open(filename + '.new', 'w')
f.write(d)
f.close()
os.rename(filename+'.new', filename)
os.unlink(filename+'.bak')
class NoDocumentsFound(Exception):
"""
Raised when no input documents are found.
"""
class LoreBuilderMixin(object):
"""
Base class for builders which invoke lore.
"""
def lore(self, arguments):
"""
Run lore with the given arguments.
@param arguments: A C{list} of C{str} giving command line arguments to
lore which should be used.
"""
options = lore.Options()
options.parseOptions(["--null"] + arguments)
lore.runGivenOptions(options)
class DocBuilder(LoreBuilderMixin):
"""
Generate HTML documentation for projects.
"""
def build(self, version, resourceDir, docDir, template, apiBaseURL=None,
deleteInput=False):
"""
Build the documentation in C{docDir} with Lore.
Input files ending in .xhtml will be considered. Output will written as
.html files.
@param version: the version of the documentation to pass to lore.
@type version: C{str}
@param resourceDir: The directory which contains the toplevel index and
stylesheet file for this section of documentation.
@type resourceDir: L{twisted.python.filepath.FilePath}
@param docDir: The directory of the documentation.
@type docDir: L{twisted.python.filepath.FilePath}
@param template: The template used to generate the documentation.
@type template: L{twisted.python.filepath.FilePath}
@type apiBaseURL: C{str} or C{NoneType}
@param apiBaseURL: A format string which will be interpolated with the
fully-qualified Python name for each API link. For example, to
generate the Twisted 8.0.0 documentation, pass
C{"http://twistedmatrix.com/documents/8.0.0/api/%s.html"}.
@param deleteInput: If True, the input documents will be deleted after
their output is generated.
@type deleteInput: C{bool}
@raise NoDocumentsFound: When there are no .xhtml files in the given
C{docDir}.
"""
linkrel = self.getLinkrel(resourceDir, docDir)
inputFiles = docDir.globChildren("*.xhtml")
filenames = [x.path for x in inputFiles]
if not filenames:
raise NoDocumentsFound("No input documents found in %s" % (docDir,))
if apiBaseURL is not None:
arguments = ["--config", "baseurl=" + apiBaseURL]
else:
arguments = []
arguments.extend(["--config", "template=%s" % (template.path,),
"--config", "ext=.html",
"--config", "version=%s" % (version,),
"--linkrel", linkrel] + filenames)
self.lore(arguments)
if deleteInput:
for inputFile in inputFiles:
inputFile.remove()
def getLinkrel(self, resourceDir, docDir):
"""
Calculate a value appropriate for Lore's --linkrel option.
Lore's --linkrel option defines how to 'find' documents that are
linked to from TEMPLATE files (NOT document bodies). That is, it's a
prefix for links ('a' and 'link') in the template.
@param resourceDir: The directory which contains the toplevel index and
stylesheet file for this section of documentation.
@type resourceDir: L{twisted.python.filepath.FilePath}
@param docDir: The directory containing documents that must link to
C{resourceDir}.
@type docDir: L{twisted.python.filepath.FilePath}
"""
if resourceDir != docDir:
return '/'.join(filePathDelta(docDir, resourceDir)) + "/"
else:
return ""
class ManBuilder(LoreBuilderMixin):
"""
Generate man pages of the different existing scripts.
"""
def build(self, manDir):
"""
Generate Lore input files from the man pages in C{manDir}.
Input files ending in .1 will be considered. Output will written as
-man.xhtml files.
@param manDir: The directory of the man pages.
@type manDir: L{twisted.python.filepath.FilePath}
@raise NoDocumentsFound: When there are no .1 files in the given
C{manDir}.
"""
inputFiles = manDir.globChildren("*.1")
filenames = [x.path for x in inputFiles]
if not filenames:
raise NoDocumentsFound("No manual pages found in %s" % (manDir,))
arguments = ["--input", "man",
"--output", "lore",
"--config", "ext=-man.xhtml"] + filenames
self.lore(arguments)
class APIBuilder(object):
"""
Generate API documentation from source files using
U{pydoctor<http://codespeak.net/~mwh/pydoctor/>}. This requires
pydoctor to be installed and usable (which means you won't be able to
use it with Python 2.3).
"""
def build(self, projectName, projectURL, sourceURL, packagePath,
outputPath):
"""
Call pydoctor's entry point with options which will generate HTML
documentation for the specified package's API.
@type projectName: C{str}
@param projectName: The name of the package for which to generate
documentation.
@type projectURL: C{str}
@param projectURL: The location (probably an HTTP URL) of the project
on the web.
@type sourceURL: C{str}
@param sourceURL: The location (probably an HTTP URL) of the root of
the source browser for the project.
@type packagePath: L{FilePath}
@param packagePath: The path to the top-level of the package named by
C{projectName}.
@type outputPath: L{FilePath}
@param outputPath: An existing directory to which the generated API
documentation will be written.
"""
from pydoctor.driver import main
main(
["--project-name", projectName,
"--project-url", projectURL,
"--system-class", "pydoctor.twistedmodel.TwistedSystem",
"--project-base-dir", packagePath.parent().path,
"--html-viewsource-base", sourceURL,
"--add-package", packagePath.path,
"--html-output", outputPath.path,
"--html-write-function-pages", "--quiet", "--make-html"])
class BookBuilder(LoreBuilderMixin):
"""
Generate the LaTeX and PDF documentation.
The book is built by assembling a number of LaTeX documents. Only the
overall document which describes how to assemble the documents is stored
in LaTeX in the source. The rest of the documentation is generated from
Lore input files. These are primarily XHTML files (of the particular
Lore subset), but man pages are stored in GROFF format. BookBuilder
expects all of its input to be Lore XHTML format, so L{ManBuilder}
should be invoked first if the man pages are to be included in the
result (this is determined by the book LaTeX definition file).
Therefore, a sample usage of BookBuilder may look something like this::
man = ManBuilder()
man.build(FilePath("doc/core/man"))
book = BookBuilder()
book.build(
FilePath('doc/core/howto'),
[FilePath('doc/core/howto'), FilePath('doc/core/howto/tutorial'),
FilePath('doc/core/man'), FilePath('doc/core/specifications')],
FilePath('doc/core/howto/book.tex'), FilePath('/tmp/book.pdf'))
"""
def run(self, command):
"""
Execute a command in a child process and return the output.
@type command: C{str}
@param command: The shell command to run.
@raise CommandFailed: If the child process exits with an error.
"""
return runCommand(command)
def buildTeX(self, howtoDir):
"""
Build LaTeX files for lore input files in the given directory.
Input files ending in .xhtml will be considered. Output will written as
.tex files.
@type howtoDir: L{FilePath}
@param howtoDir: A directory containing lore input files.
@raise ValueError: If C{howtoDir} does not exist.
"""
if not howtoDir.exists():
raise ValueError("%r does not exist." % (howtoDir.path,))
self.lore(
["--output", "latex",
"--config", "section"] +
[child.path for child in howtoDir.globChildren("*.xhtml")])
def buildPDF(self, bookPath, inputDirectory, outputPath):
"""
Build a PDF from the given a LaTeX book document.
@type bookPath: L{FilePath}
@param bookPath: The location of a LaTeX document defining a book.
@type inputDirectory: L{FilePath}
@param inputDirectory: The directory which the inputs of the book are
relative to.
@type outputPath: L{FilePath}
@param outputPath: The location to which to write the resulting book.
"""
if not bookPath.basename().endswith(".tex"):
raise ValueError("Book filename must end with .tex")
workPath = FilePath(mkdtemp())
try:
startDir = os.getcwd()
try:
os.chdir(inputDirectory.path)
texToDVI = [
"latex", "-interaction=nonstopmode",
"-output-directory=" + workPath.path,
bookPath.path]
# What I tell you three times is true!
# The first two invocations of latex on the book file allows it
# correctly create page numbers for in-text references. Why this is
# the case, I could not tell you. -exarkun
for i in range(3):
self.run(texToDVI)
bookBaseWithoutExtension = bookPath.basename()[:-4]
dviPath = workPath.child(bookBaseWithoutExtension + ".dvi")
psPath = workPath.child(bookBaseWithoutExtension + ".ps")
pdfPath = workPath.child(bookBaseWithoutExtension + ".pdf")
self.run([
"dvips", "-o", psPath.path, "-t", "letter", "-Ppdf",
dviPath.path])
self.run(["ps2pdf13", psPath.path, pdfPath.path])
pdfPath.moveTo(outputPath)
workPath.remove()
finally:
os.chdir(startDir)
except:
workPath.moveTo(bookPath.parent().child(workPath.basename()))
raise
def build(self, baseDirectory, inputDirectories, bookPath, outputPath):
"""
Build a PDF book from the given TeX book definition and directories
containing lore inputs.
@type baseDirectory: L{FilePath}
@param baseDirectory: The directory which the inputs of the book are
relative to.
@type inputDirectories: C{list} of L{FilePath}
@param inputDirectories: The paths which contain lore inputs to be
converted to LaTeX.
@type bookPath: L{FilePath}
@param bookPath: The location of a LaTeX document defining a book.
@type outputPath: L{FilePath}
@param outputPath: The location to which to write the resulting book.
"""
for inputDir in inputDirectories:
self.buildTeX(inputDir)
self.buildPDF(bookPath, baseDirectory, outputPath)
for inputDirectory in inputDirectories:
for child in inputDirectory.children():
if child.splitext()[1] == ".tex" and child != bookPath:
child.remove()
class NewsBuilder(object):
"""
Generate the new section of a NEWS file.
The C{_FEATURE}, C{_BUGFIX}, C{_DOC}, C{_REMOVAL}, and C{_MISC}
attributes of this class are symbolic names for the news entry types
which are supported. Conveniently, they each also take on the value of
the file name extension which indicates a news entry of that type.
@cvar blacklist: A C{list} of C{str} of projects for which we should not
generate news at all. Same as C{PROJECT_BLACKLIST}.
@cvar _headings: A C{dict} mapping one of the news entry types to the
heading to write out for that type of news entry.
@cvar _NO_CHANGES: A C{str} giving the text which appears when there are
no significant changes in a release.
@cvar _TICKET_HINT: A C{str} giving the text which appears at the top of
each news file and which should be kept at the top, not shifted down
with all the other content. Put another way, this is the text after
which the new news text is inserted.
"""
blacklist = PROJECT_BLACKLIST
_FEATURE = ".feature"
_BUGFIX = ".bugfix"
_DOC = ".doc"
_REMOVAL = ".removal"
_MISC = ".misc"
_headings = {
_FEATURE: "Features",
_BUGFIX: "Bugfixes",
_DOC: "Improved Documentation",
_REMOVAL: "Deprecations and Removals",
_MISC: "Other",
}
_NO_CHANGES = "No significant changes have been made for this release.\n"
_TICKET_HINT = (
'Ticket numbers in this file can be looked up by visiting\n'
'http://twistedmatrix.com/trac/ticket/<number>\n'
'\n')
def _today(self):
"""
Return today's date as a string in YYYY-MM-DD format.
"""
return date.today().strftime('%Y-%m-%d')
def _findChanges(self, path, ticketType):
"""
Load all the feature ticket summaries.
@param path: A L{FilePath} the direct children of which to search
for news entries.
@param ticketType: The type of news entries to search for. One of
L{NewsBuilder._FEATURE}, L{NewsBuilder._BUGFIX},
L{NewsBuilder._REMOVAL}, or L{NewsBuilder._MISC}.
@return: A C{list} of two-tuples. The first element is the ticket
number as an C{int}. The second element of each tuple is the
description of the feature.
"""
results = []
for child in path.children():
base, ext = os.path.splitext(child.basename())
if ext == ticketType:
results.append((
int(base),
' '.join(child.getContent().splitlines())))
results.sort()
return results
def _formatHeader(self, header):
"""
Format a header for a NEWS file.
A header is a title with '=' signs underlining it.
@param header: The header string to format.
@type header: C{str}
@return: A C{str} containing C{header}.
"""
return header + '\n' + '=' * len(header) + '\n\n'
def _writeHeader(self, fileObj, header):
"""
Write a version header to the given file.
@param fileObj: A file-like object to which to write the header.
@param header: The header to write to the file.
@type header: C{str}
"""
fileObj.write(self._formatHeader(header))
def _writeSection(self, fileObj, header, tickets):
"""
Write out one section (features, bug fixes, etc) to the given file.
@param fileObj: A file-like object to which to write the news section.
@param header: The header for the section to write.
@type header: C{str}
@param tickets: A C{list} of ticket information of the sort returned
by L{NewsBuilder._findChanges}.
"""
if not tickets:
return
reverse = {}
for (ticket, description) in tickets:
reverse.setdefault(description, []).append(ticket)
for description in reverse:
reverse[description].sort()
reverse = reverse.items()
reverse.sort(key=lambda (descr, tickets): tickets[0])
fileObj.write(header + '\n' + '-' * len(header) + '\n')
for (description, relatedTickets) in reverse:
ticketList = ', '.join([
'#' + str(ticket) for ticket in relatedTickets])
entry = ' - %s (%s)' % (description, ticketList)
entry = textwrap.fill(entry, subsequent_indent=' ')
fileObj.write(entry + '\n')
fileObj.write('\n')
def _writeMisc(self, fileObj, header, tickets):
"""
Write out a miscellaneous-changes section to the given file.
@param fileObj: A file-like object to which to write the news section.
@param header: The header for the section to write.
@type header: C{str}
@param tickets: A C{list} of ticket information of the sort returned
by L{NewsBuilder._findChanges}.
"""
if not tickets:
return
fileObj.write(header + '\n' + '-' * len(header) + '\n')
formattedTickets = []
for (ticket, ignored) in tickets:
formattedTickets.append('#' + str(ticket))
entry = ' - ' + ', '.join(formattedTickets)
entry = textwrap.fill(entry, subsequent_indent=' ')
fileObj.write(entry + '\n\n')
def build(self, path, output, header):
"""
Load all of the change information from the given directory and write
it out to the given output file.
@param path: A directory (probably a I{topfiles} directory) containing
change information in the form of <ticket>.<change type> files.
@type path: L{FilePath}
@param output: The NEWS file to which the results will be prepended.
@type output: L{FilePath}
@param header: The top-level header to use when writing the news.
@type header: L{str}
"""
changes = []
for part in (self._FEATURE, self._BUGFIX, self._DOC, self._REMOVAL):
tickets = self._findChanges(path, part)
if tickets:
changes.append((part, tickets))
misc = self._findChanges(path, self._MISC)
oldNews = output.getContent()
newNews = output.sibling('NEWS.new').open('w')
if oldNews.startswith(self._TICKET_HINT):
newNews.write(self._TICKET_HINT)
oldNews = oldNews[len(self._TICKET_HINT):]
self._writeHeader(newNews, header)
if changes:
for (part, tickets) in changes:
self._writeSection(newNews, self._headings.get(part), tickets)
else:
newNews.write(self._NO_CHANGES)
newNews.write('\n')
self._writeMisc(newNews, self._headings.get(self._MISC), misc)
newNews.write('\n')
newNews.write(oldNews)
newNews.close()
output.sibling('NEWS.new').moveTo(output)
def _getNewsName(self, project):
"""
Return the name of C{project} that should appear in NEWS.
@param project: A L{Project}
@return: The name of C{project}.
"""
name = project.directory.basename().title()
if name == 'Twisted':
name = 'Core'
return name
def _iterProjects(self, baseDirectory):
"""
Iterate through the Twisted projects in C{baseDirectory}, yielding
everything we need to know to build news for them.
Yields C{topfiles}, C{news}, C{name}, C{version} for each sub-project
in reverse-alphabetical order. C{topfile} is the L{FilePath} for the
topfiles directory, C{news} is the L{FilePath} for the NEWS file,
C{name} is the nice name of the project (as should appear in the NEWS
file), C{version} is the current version string for that project.
@param baseDirectory: A L{FilePath} representing the root directory
beneath which to find Twisted projects for which to generate
news (see L{findTwistedProjects}).
@type baseDirectory: L{FilePath}
"""
# Get all the subprojects to generate news for
projects = findTwistedProjects(baseDirectory)
# And order them alphabetically for ease of reading
projects.sort(key=lambda proj: proj.directory.path)
# And generate them backwards since we write news by prepending to
# files.
projects.reverse()
for aggregateNews in [False, True]:
for project in projects:
if project.directory.basename() in self.blacklist:
continue
topfiles = project.directory.child("topfiles")
if aggregateNews:
news = baseDirectory.child("NEWS")
else:
news = topfiles.child("NEWS")
name = self._getNewsName(project)
version = project.getVersion()
yield topfiles, news, name, version
def buildAll(self, baseDirectory):
"""
Find all of the Twisted subprojects beneath C{baseDirectory} and update
their news files from the ticket change description files in their
I{topfiles} directories and update the news file in C{baseDirectory}
with all of the news.
Projects that are listed in L{NewsBuilder.blacklist} will be skipped.
@param baseDirectory: A L{FilePath} representing the root directory
beneath which to find Twisted projects for which to generate
news (see L{findTwistedProjects}).
"""
today = self._today()
for topfiles, news, name, version in self._iterProjects(baseDirectory):
self.build(
topfiles, news,
"Twisted %s %s (%s)" % (name, version.base(), today))
def _changeNewsVersion(self, news, name, oldVersion, newVersion, today):
"""
Change all references to the current version number in a NEWS file to
refer to C{newVersion} instead.
@param news: The NEWS file to change.
@type news: L{FilePath}
@param name: The name of the project to change.
@type name: C{str}
@param oldVersion: The old version of the project.
@type oldVersion: L{Version}
@param newVersion: The new version of the project.
@type newVersion: L{Version}
@param today: A YYYY-MM-DD string representing today's date.
@type today: C{str}
"""
newHeader = self._formatHeader(
"Twisted %s %s (%s)" % (name, newVersion.base(), today))
expectedHeaderRegex = re.compile(
r"Twisted %s %s \(\d{4}-\d\d-\d\d\)\n=+\n\n" % (
re.escape(name), re.escape(oldVersion.base())))
oldNews = news.getContent()
match = expectedHeaderRegex.search(oldNews)
if match:
oldHeader = match.group()
replaceInFile(news.path, {oldHeader: newHeader})
def main(self, args):
"""
Build all news files.
@param args: The command line arguments to process. This must contain
one string, the path to the base of the Twisted checkout for which
to build the news.
@type args: C{list} of C{str}
"""
if len(args) != 1:
sys.exit("Must specify one argument: the path to the Twisted checkout")
self.buildAll(FilePath(args[0]))
def filePathDelta(origin, destination):
"""
Return a list of strings that represent C{destination} as a path relative
to C{origin}.
It is assumed that both paths represent directories, not files. That is to
say, the delta of L{twisted.python.filepath.FilePath} /foo/bar to
L{twisted.python.filepath.FilePath} /foo/baz will be C{../baz},
not C{baz}.
@type origin: L{twisted.python.filepath.FilePath}
@param origin: The origin of the relative path.
@type destination: L{twisted.python.filepath.FilePath}
@param destination: The destination of the relative path.
"""
commonItems = 0
path1 = origin.path.split(os.sep)
path2 = destination.path.split(os.sep)
for elem1, elem2 in zip(path1, path2):
if elem1 == elem2:
commonItems += 1
else:
break
path = [".."] * (len(path1) - commonItems)
return path + path2[commonItems:]
class DistributionBuilder(object):
"""
A builder of Twisted distributions.
This knows how to build tarballs for Twisted and all of its subprojects.
@type blacklist: C{list} of C{str}
@cvar blacklist: The list of subproject names to exclude from the main
Twisted tarball and for which no individual project tarballs will be
built. The same list as C{PROJECT_BLACKLIST}.
"""
from twisted.python.dist import twisted_subprojects as subprojects
blacklist = PROJECT_BLACKLIST
def __init__(self, rootDirectory, outputDirectory, apiBaseURL=None):
"""
Create a distribution builder.
@param rootDirectory: root of a Twisted export which will populate
subsequent tarballs.
@type rootDirectory: L{FilePath}.
@param outputDirectory: The directory in which to create the tarballs.
@type outputDirectory: L{FilePath}
@type apiBaseURL: C{str} or C{NoneType}
@param apiBaseURL: A format string which will be interpolated with the
fully-qualified Python name for each API link. For example, to
generate the Twisted 8.0.0 documentation, pass
C{"http://twistedmatrix.com/documents/8.0.0/api/%s.html"}.
"""
self.rootDirectory = rootDirectory
self.outputDirectory = outputDirectory
self.apiBaseURL = apiBaseURL
self.manBuilder = ManBuilder()
self.docBuilder = DocBuilder()
def _buildDocInDir(self, path, version, howtoPath):
"""
Generate documentation in the given path, building man pages first if
necessary and swallowing errors (so that directories without lore
documentation in them are ignored).
@param path: The path containing documentation to build.
@type path: L{FilePath}
@param version: The version of the project to include in all generated
pages.
@type version: C{str}
@param howtoPath: The "resource path" as L{DocBuilder} describes it.
@type howtoPath: L{FilePath}
"""
templatePath = self.rootDirectory.child("doc").child("core"
).child("howto").child("template.tpl")
if path.basename() == "man":
self.manBuilder.build(path)
if path.isdir():
try:
self.docBuilder.build(version, howtoPath, path,
templatePath, self.apiBaseURL, True)
except NoDocumentsFound:
pass
def buildTwisted(self, version):
"""
Build the main Twisted distribution in C{Twisted-<version>.tar.bz2}.
Projects listed in in L{blacklist} will not have their plugins, code,
documentation, or bin directories included.
bin/admin is also excluded.
@type version: C{str}
@param version: The version of Twisted to build.
@return: The tarball file.
@rtype: L{FilePath}.
"""
releaseName = "Twisted-%s" % (version,)
buildPath = lambda *args: '/'.join((releaseName,) + args)
outputFile = self.outputDirectory.child(releaseName + ".tar.bz2")
tarball = tarfile.TarFile.open(outputFile.path, 'w:bz2')
docPath = self.rootDirectory.child("doc")
# Generate docs!
if docPath.isdir():
for subProjectDir in docPath.children():
if (subProjectDir.isdir()
and subProjectDir.basename() not in self.blacklist):
for child in subProjectDir.walk():
self._buildDocInDir(child, version,
subProjectDir.child("howto"))
# Now, this part is nasty. We need to exclude blacklisted subprojects
# from the main Twisted distribution. This means we need to exclude
# their bin directories, their documentation directories, their
# plugins, and their python packages. Given that there's no "add all
# but exclude these particular paths" functionality in tarfile, we have
# to walk through all these directories and add things that *aren't*
# part of the blacklisted projects.
for binthing in self.rootDirectory.child("bin").children():
# bin/admin should also not be included.
if binthing.basename() not in self.blacklist + ["admin"]:
tarball.add(binthing.path,
buildPath("bin", binthing.basename()))
bad_plugins = ["twisted_%s.py" % (blacklisted,)
for blacklisted in self.blacklist]
for submodule in self.rootDirectory.child("twisted").children():
if submodule.basename() == "plugins":
for plugin in submodule.children():
if plugin.basename() not in bad_plugins:
tarball.add(plugin.path, buildPath("twisted", "plugins",
plugin.basename()))
elif submodule.basename() not in self.blacklist:
tarball.add(submodule.path, buildPath("twisted",
submodule.basename()))
for docDir in self.rootDirectory.child("doc").children():
if docDir.basename() not in self.blacklist:
tarball.add(docDir.path, buildPath("doc", docDir.basename()))
for toplevel in self.rootDirectory.children():
if not toplevel.isdir():
tarball.add(toplevel.path, buildPath(toplevel.basename()))
tarball.close()
return outputFile
def buildCore(self, version):
"""
Build a core distribution in C{TwistedCore-<version>.tar.bz2}.
This is very similar to L{buildSubProject}, but core tarballs and the
input are laid out slightly differently.
- scripts are in the top level of the C{bin} directory.
- code is included directly from the C{twisted} directory, excluding
subprojects.
- all plugins except the subproject plugins are included.
@type version: C{str}
@param version: The version of Twisted to build.
@return: The tarball file.
@rtype: L{FilePath}.
"""
releaseName = "TwistedCore-%s" % (version,)
outputFile = self.outputDirectory.child(releaseName + ".tar.bz2")
buildPath = lambda *args: '/'.join((releaseName,) + args)
tarball = self._createBasicSubprojectTarball(
"core", version, outputFile)
# Include the bin directory for the subproject.
for path in self.rootDirectory.child("bin").children():
if not path.isdir():
tarball.add(path.path, buildPath("bin", path.basename()))
# Include all files within twisted/ that aren't part of a subproject.
for path in self.rootDirectory.child("twisted").children():
if path.basename() == "plugins":
for plugin in path.children():
for subproject in self.subprojects:
if plugin.basename() == "twisted_%s.py" % (subproject,):
break
else:
tarball.add(plugin.path,
buildPath("twisted", "plugins",
plugin.basename()))
elif not path.basename() in self.subprojects + ["topfiles"]:
tarball.add(path.path, buildPath("twisted", path.basename()))
tarball.add(self.rootDirectory.child("twisted").child("topfiles").path,
releaseName)
tarball.close()
return outputFile
def buildSubProject(self, projectName, version):
"""
Build a subproject distribution in
C{Twisted<Projectname>-<version>.tar.bz2}.
@type projectName: C{str}
@param projectName: The lowercase name of the subproject to build.
@type version: C{str}
@param version: The version of Twisted to build.
@return: The tarball file.
@rtype: L{FilePath}.
"""
releaseName = "Twisted%s-%s" % (projectName.capitalize(), version)
outputFile = self.outputDirectory.child(releaseName + ".tar.bz2")
buildPath = lambda *args: '/'.join((releaseName,) + args)
subProjectDir = self.rootDirectory.child("twisted").child(projectName)
tarball = self._createBasicSubprojectTarball(projectName, version,
outputFile)
tarball.add(subProjectDir.child("topfiles").path, releaseName)
# Include all files in the subproject package except for topfiles.
for child in subProjectDir.children():
name = child.basename()
if name != "topfiles":
tarball.add(
child.path,
buildPath("twisted", projectName, name))
pluginsDir = self.rootDirectory.child("twisted").child("plugins")
# Include the plugin for the subproject.
pluginFileName = "twisted_%s.py" % (projectName,)
pluginFile = pluginsDir.child(pluginFileName)
if pluginFile.exists():
tarball.add(pluginFile.path,
buildPath("twisted", "plugins", pluginFileName))
# Include the bin directory for the subproject.
binPath = self.rootDirectory.child("bin").child(projectName)
if binPath.isdir():
tarball.add(binPath.path, buildPath("bin"))
tarball.close()
return outputFile
def _createBasicSubprojectTarball(self, projectName, version, outputFile):
"""
Helper method to create and fill a tarball with things common between
subprojects and core.
@param projectName: The subproject's name.
@type projectName: C{str}
@param version: The version of the release.
@type version: C{str}
@param outputFile: The location of the tar file to create.
@type outputFile: L{FilePath}
"""
releaseName = "Twisted%s-%s" % (projectName.capitalize(), version)
buildPath = lambda *args: '/'.join((releaseName,) + args)
tarball = tarfile.TarFile.open(outputFile.path, 'w:bz2')
tarball.add(self.rootDirectory.child("LICENSE").path,
buildPath("LICENSE"))
docPath = self.rootDirectory.child("doc").child(projectName)
if docPath.isdir():
for child in docPath.walk():
self._buildDocInDir(child, version, docPath.child("howto"))
tarball.add(docPath.path, buildPath("doc"))
return tarball
class UncleanWorkingDirectory(Exception):
"""
Raised when the working directory of an SVN checkout is unclean.
"""
class NotWorkingDirectory(Exception):
"""
Raised when a directory does not appear to be an SVN working directory.
"""
def buildAllTarballs(checkout, destination):
"""
Build complete tarballs (including documentation) for Twisted and all
subprojects.
This should be called after the version numbers have been updated and
NEWS files created.
@type checkout: L{FilePath}
@param checkout: The SVN working copy from which a pristine source tree
will be exported.
@type destination: L{FilePath}
@param destination: The directory in which tarballs will be placed.
@raise UncleanWorkingDirectory: if there are modifications to the
working directory of C{checkout}.
@raise NotWorkingDirectory: if the checkout path is not an SVN checkout.
"""
if not checkout.child(".svn").exists():
raise NotWorkingDirectory(
"%s does not appear to be an SVN working directory."
% (checkout.path,))
if runCommand(["svn", "st", checkout.path]).strip():
raise UncleanWorkingDirectory(
"There are local modifications to the SVN checkout in %s."
% (checkout.path,))
workPath = FilePath(mkdtemp())
export = workPath.child("export")
runCommand(["svn", "export", checkout.path, export.path])
twistedPath = export.child("twisted")
version = Project(twistedPath).getVersion()
versionString = version.base()
apiBaseURL = "http://twistedmatrix.com/documents/%s/api/%%s.html" % (
versionString)
if not destination.exists():
destination.createDirectory()
db = DistributionBuilder(export, destination, apiBaseURL=apiBaseURL)
db.buildCore(versionString)
for subproject in twisted_subprojects:
if (subproject not in db.blacklist
and twistedPath.child(subproject).exists()):
db.buildSubProject(subproject, versionString)
db.buildTwisted(versionString)
workPath.remove()
class ChangeVersionsScript(object):
"""
A thing for changing version numbers. See L{main}.
"""
changeAllProjectVersions = staticmethod(changeAllProjectVersions)
def main(self, args):
"""
Given a list of command-line arguments, change all the Twisted versions
in the current directory.
@type args: list of str
@param args: List of command line arguments. This should only
contain the version number.
"""
version_format = (
"Version should be in a form kind of like '1.2.3[pre4]'")
if len(args) != 1:
sys.exit("Must specify exactly one argument to change-versions")
version = args[0]
try:
major, minor, micro_and_pre = version.split(".")
except ValueError:
raise SystemExit(version_format)
if "pre" in micro_and_pre:
micro, pre = micro_and_pre.split("pre")
else:
micro = micro_and_pre
pre = None
try:
major = int(major)
minor = int(minor)
micro = int(micro)
if pre is not None:
pre = int(pre)
except ValueError:
raise SystemExit(version_format)
version_template = Version("Whatever",
major, minor, micro, prerelease=pre)
self.changeAllProjectVersions(FilePath("."), version_template)
class BuildTarballsScript(object):
"""
A thing for building release tarballs. See L{main}.
"""
buildAllTarballs = staticmethod(buildAllTarballs)
def main(self, args):
"""
Build all release tarballs.
@type args: list of str
@param args: The command line arguments to process. This must contain
two strings: the checkout directory and the destination directory.
"""
if len(args) != 2:
sys.exit("Must specify two arguments: "
"Twisted checkout and destination path")
self.buildAllTarballs(FilePath(args[0]), FilePath(args[1]))
class BuildAPIDocsScript(object):
"""
A thing for building API documentation. See L{main}.
"""
def buildAPIDocs(self, projectRoot, output):
"""
Build the API documentation of Twisted, with our project policy.
@param projectRoot: A L{FilePath} representing the root of the Twisted
checkout.
@param output: A L{FilePath} pointing to the desired output directory.
"""
version = Project(projectRoot.child("twisted")).getVersion()
versionString = version.base()
sourceURL = ("http://twistedmatrix.com/trac/browser/tags/releases/"
"twisted-%s" % (versionString,))
apiBuilder = APIBuilder()
apiBuilder.build(
"Twisted",
"http://twistedmatrix.com/",
sourceURL,
projectRoot.child("twisted"),
output)
def main(self, args):
"""
Build API documentation.
@type args: list of str
@param args: The command line arguments to process. This must contain
two strings: the path to the root of the Twisted checkout, and a
path to an output directory.
"""
if len(args) != 2:
sys.exit("Must specify two arguments: "
"Twisted checkout and destination path")
self.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))
|
|
"""Compatibility
This module is to ensure the compatibility between Maya, Avalon and Pyblish
is maintained.
"""
import maya.cmds as cmds
import os
import logging
import avalon.pipeline
log = logging.getLogger(__name__)
create = avalon.pipeline.create
def remove_googleapiclient():
"""Check if the compatibility must be maintained
The Maya 2018 version tries to import the `http` module from
Maya2018\plug-ins\MASH\scripts\googleapiclient\http.py in stead of the
module from six.py. This import conflict causes a crash Avalon's publisher.
This is due to Autodesk adding paths to the PYTHONPATH environment variable
which contain modules instead of only packages.
"""
keyword = "googleapiclient"
# reconstruct python paths
python_paths = os.environ["PYTHONPATH"].split(os.pathsep)
paths = [path for path in python_paths if keyword not in path]
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
def install():
"""Run all compatibility functions"""
if cmds.about(version=True) == "2018":
remove_googleapiclient()
def load(Loader,
representation,
name=None,
namespace=None,
data=None):
"""Load asset via database
Deprecated; this functionality is replaced by `api.load()`
Arguments:
Loader (api.Loader): The loader to process in host Maya.
representation (dict, io.ObjectId or str): Address to representation
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
data (dict, optional): Additional settings dictionary
"""
from avalon.vendor import six
from avalon import io
from avalon.maya import lib
from avalon.maya.pipeline import containerise
assert representation is not None, "This is a bug"
if isinstance(representation, (six.string_types, io.ObjectId)):
representation = io.find_one({"_id": io.ObjectId(str(representation))})
version, subset, asset, project = io.parenthood(representation)
assert all([representation, version, subset, asset, project]), (
"This is a bug"
)
context = {
"project": project,
"asset": asset,
"subset": subset,
"version": version,
"representation": representation,
}
# Ensure data is a dictionary when no explicit data provided
if data is None:
data = dict()
assert isinstance(data, dict), "Data must be a dictionary"
name = name or subset["name"]
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
# TODO(roy): add compatibility check, see `tools.cbloader.lib`
Loader.log.info(
"Running '%s' on '%s'" % (Loader.__name__, asset["name"])
)
try:
loader = Loader(context)
with lib.maintained_selection():
loader.process(name, namespace, context, data)
except OSError as e:
log.info("WARNING: %s" % e)
return list()
# Only containerize if any nodes were loaded by the Loader
nodes = loader[:]
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=loader[:],
context=context,
loader=Loader.__name__)
def update(container, version=-1):
"""Update `container` to `version`
Deprecated; this functionality is replaced by `api.update()`
This function relies on a container being referenced. At the time of this
writing, all assets - models, rigs, animations, shaders - are referenced
and should pose no problem. But should there be an asset that isn't
referenced then this function will need to see an update.
Arguments:
container (avalon-core:container-1.0): Container to update,
from `host.ls()`.
version (int, optional): Update the container to this version.
If no version is passed, the latest is assumed.
"""
from avalon import io
from avalon import api
node = container["objectName"]
# Assume asset has been referenced
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
current_representation = io.find_one({
"_id": io.ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
version_, subset, asset, project = io.parenthood(current_representation)
if version == -1:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
})
new_representation = io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
})
assert new_version is not None, "This is a bug"
template_publish = project["config"]["template"]["publish"]
fname = template_publish.format(**{
"root": api.registered_root(),
"project": project["name"],
"asset": asset["name"],
"silo": asset["silo"],
"subset": subset["name"],
"version": new_version["name"],
"representation": current_representation["name"],
})
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(new_representation["name"])
assert file_type, ("Unsupported representation: %s" % new_representation)
assert os.path.exists(fname), "%s does not exist." % fname
cmds.file(fname, loadReference=reference_node, type=file_type)
# Update metadata
cmds.setAttr(container["objectName"] + ".representation",
str(new_representation["_id"]),
type="string")
def remove(container):
"""Remove an existing `container` from Maya scene
Deprecated; this functionality is replaced by `api.remove()`
Arguments:
container (avalon-core:container-1.0): Which container
to remove from scene.
"""
node = container["objectName"]
# Assume asset has been referenced
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
log.info("Removing '%s' from Maya.." % container["name"])
namespace = cmds.referenceQuery(reference_node, namespace=True)
fname = cmds.referenceQuery(reference_node, filename=True)
cmds.file(fname, removeReference=True)
try:
cmds.delete(node)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
try:
# If container is not automatically cleaned up by May (issue #118)
cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True)
except RuntimeError:
pass
class BackwardsCompatibleLoader(avalon.pipeline.Loader):
"""A backwards compatible loader.
This triggers the old-style `process` through the old Maya's host `load`,
`update` and `remove` methods and exposes it through the new-style Loader
api.
Note: This inherits from `avalon.pipeline.Loader` and *not* from
`avalon.maya.pipeline.Loader`
"""
def load(self,
context,
name=None,
namespace=None,
data=None):
return load(Loader=self.__class__,
representation=context['representation'],
name=name,
namespace=namespace,
data=data)
def remove(self, container):
return remove(container)
def update(self, container, representation):
version = representation['context']['version']
return update(container, version=version)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import traceback
import xml.dom.minidom as xml
from xml.parsers.expat import ExpatError
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import IsDownloadError
from docs_server_utils import StringIdentity
from file_system import (
FileNotFoundError, FileSystem, FileSystemError, StatInfo)
from future import Future
import url_constants
def _ParseHTML(html):
'''Unfortunately, the viewvc page has a stray </div> tag, so this takes care
of all mismatched tags.
'''
try:
return xml.parseString(html)
except ExpatError as e:
return _ParseHTML('\n'.join(
line for (i, line) in enumerate(html.split('\n'))
if e.lineno != i + 1))
def _InnerText(node):
'''Like node.innerText in JS DOM, but strips surrounding whitespace.
'''
text = []
if node.nodeValue:
text.append(node.nodeValue)
if hasattr(node, 'childNodes'):
for child_node in node.childNodes:
text.append(_InnerText(child_node))
return ''.join(text).strip()
def _CreateStatInfo(html):
parent_version = None
child_versions = {}
# Try all of the tables until we find the ones that contain the data (the
# directory and file versions are in different tables).
for table in _ParseHTML(html).getElementsByTagName('table'):
# Within the table there is a list of files. However, there may be some
# things beforehand; a header, "parent directory" list, etc. We will deal
# with that below by being generous and just ignoring such rows.
rows = table.getElementsByTagName('tr')
for row in rows:
cells = row.getElementsByTagName('td')
# The version of the directory will eventually appear in the soup of
# table rows, like this:
#
# <tr>
# <td>Directory revision:</td>
# <td><a href=... title="Revision 214692">214692</a> (of...)</td>
# </tr>
#
# So look out for that.
if len(cells) == 2 and _InnerText(cells[0]) == 'Directory revision:':
links = cells[1].getElementsByTagName('a')
if len(links) != 2:
raise FileSystemError('ViewVC assumption invalid: directory ' +
'revision content did not have 2 <a> ' +
' elements, instead %s' % _InnerText(cells[1]))
this_parent_version = _InnerText(links[0])
int(this_parent_version) # sanity check
if parent_version is not None:
raise FileSystemError('There was already a parent version %s, and ' +
' we just found a second at %s' %
(parent_version, this_parent_version))
parent_version = this_parent_version
# The version of each file is a list of rows with 5 cells: name, version,
# age, author, and last log entry. Maybe the columns will change; we're
# at the mercy viewvc, but this constant can be easily updated.
if len(cells) != 5:
continue
name_element, version_element, _, __, ___ = cells
name = _InnerText(name_element) # note: will end in / for directories
try:
version = int(_InnerText(version_element))
except StandardError:
continue
child_versions[name] = str(version)
if parent_version and child_versions:
break
return StatInfo(parent_version, child_versions)
class SubversionFileSystem(FileSystem):
'''Class to fetch resources from src.chromium.org.
'''
@staticmethod
def Create(branch='trunk', revision=None):
if branch == 'trunk':
svn_path = 'trunk/src'
else:
svn_path = 'branches/%s/src' % branch
return SubversionFileSystem(
AppEngineUrlFetcher('%s/%s' % (url_constants.SVN_URL, svn_path)),
AppEngineUrlFetcher('%s/%s' % (url_constants.VIEWVC_URL, svn_path)),
svn_path,
revision=revision)
def __init__(self, file_fetcher, stat_fetcher, svn_path, revision=None):
self._file_fetcher = file_fetcher
self._stat_fetcher = stat_fetcher
self._svn_path = svn_path
self._revision = revision
def Read(self, paths, skip_not_found=False):
args = None
if self._revision is not None:
# |fetcher| gets from svn.chromium.org which uses p= for version.
args = 'p=%s' % self._revision
def apply_args(path):
return path if args is None else '%s?%s' % (path, args)
def list_dir(directory):
dom = xml.parseString(directory)
files = [elem.childNodes[0].data
for elem in dom.getElementsByTagName('a')]
if '..' in files:
files.remove('..')
return files
# A list of tuples of the form (path, Future).
fetches = [(path, self._file_fetcher.FetchAsync(apply_args(path)))
for path in paths]
def resolve():
value = {}
for path, future in fetches:
try:
result = future.Get()
except Exception as e:
if skip_not_found and IsDownloadError(e): continue
exc_type = (FileNotFoundError if IsDownloadError(e)
else FileSystemError)
raise exc_type('%s fetching %s for Get: %s' %
(type(e).__name__, path, traceback.format_exc()))
if result.status_code == 404:
if skip_not_found: continue
raise FileNotFoundError(
'Got 404 when fetching %s for Get, content %s' %
(path, result.content))
if result.status_code != 200:
raise FileSystemError('Got %s when fetching %s for Get, content %s' %
(result.status_code, path, result.content))
if path.endswith('/'):
value[path] = list_dir(result.content)
else:
value[path] = result.content
return value
return Future(callback=resolve)
def Refresh(self):
return Future(value=())
def Stat(self, path):
return self.StatAsync(path).Get()
def StatAsync(self, path):
directory, filename = posixpath.split(path)
if self._revision is not None:
# |stat_fetch| uses viewvc which uses pathrev= for version.
directory += '?pathrev=%s' % self._revision
result_future = self._stat_fetcher.FetchAsync(directory)
def resolve():
try:
result = result_future.Get()
except Exception as e:
exc_type = FileNotFoundError if IsDownloadError(e) else FileSystemError
raise exc_type('%s fetching %s for Stat: %s' %
(type(e).__name__, path, traceback.format_exc()))
if result.status_code == 404:
raise FileNotFoundError('Got 404 when fetching %s for Stat, '
'content %s' % (path, result.content))
if result.status_code != 200:
raise FileNotFoundError('Got %s when fetching %s for Stat, content %s' %
(result.status_code, path, result.content))
stat_info = _CreateStatInfo(result.content)
if stat_info.version is None:
raise FileSystemError('Failed to find version of dir %s' % directory)
if path == '' or path.endswith('/'):
return stat_info
if filename not in stat_info.child_versions:
raise FileNotFoundError(
'%s from %s was not in child versions for Stat' % (filename, path))
return StatInfo(stat_info.child_versions[filename])
return Future(callback=resolve)
def GetIdentity(self):
# NOTE: no revision here, since it would mess up the caching of reads. It
# probably doesn't matter since all the caching classes will use the result
# of Stat to decide whether to re-read - and Stat has a ceiling of the
# revision - so when the revision changes, so might Stat. That is enough.
return '@'.join((self.__class__.__name__, StringIdentity(self._svn_path)))
|
|
#!/usr/bin/env python
import sys
import os
import re
import string
import glob
import argparse
import subprocess
import tempfile
import shutil
import urllib
PLIST_BUDDY = "/usr/libexec/PlistBuddy"
MOBILE_PROVISIONS = "~/Library/MobileDevice/Provisioning Profiles/*.mobileprovision"
OUTPUT_IPA = "Output.ipa"
ICON_PNG = "Icon.png"
MANIFEST_PLIST = "manifest.plist"
INDEX_HTML = "index.html"
DEFAULT_DROPBOX_ROOT = "/AdHocBuilds"
tmpDir = None
log = None
class Logger:
def __init__(self, quiet):
self.quiet = quiet
def e(self, *args):
self._write(sys.stderr, args)
def v(self, *args):
if not self.quiet:
self._write(sys.stdout, args)
def o(self, *args):
self._write(sys.stdout, args)
def _write(self, stream, args):
for a in args:
stream.write(str(a))
stream.write("\n")
stream.flush()
def requireFile(path, errordesc, extraError = None):
if not os.path.isfile(path):
log.e("Error: ", errordesc, " not a file.")
log.e(" path = ", path)
if extraError is not None:
log.e(" ", extraError)
sys.exit(1)
def requireDir(path, errordesc, extraError = None):
if not os.path.isdir(path):
log.e("Error: ", errordesc, " not a directory.")
log.e(" path = ", path)
if extraError is not None:
log.e(" ", extraError)
sys.exit(1)
def requireMatch(pattern, string, errordesc):
m = re.match(pattern, string)
if m is None:
log.e("Error: ", errordesc, " does not match expected pattern.")
log.e(" value = ", string)
log.e(" pattern = ", pattern)
sys.exit(1)
def getPlistValue(path, key):
try:
with open(os.devnull, 'w') as devnull:
return subprocess.check_output([PLIST_BUDDY, "-c", "Print " + key, path], stderr = devnull).strip()
except:
return ""
def writeMobileProvisionPList(mobileprovision, plistFile):
with open(plistFile, "w") as f:
r = subprocess.call(["security", "cms", "-D", "-u0", "-i", mobileprovision], stdout = f)
if r != 0:
return False
return True
def getMobileProvisionPlistValue(mobileprovision, key):
tmpFile = os.path.join(tmpDir, "tmp.plist")
if not writeMobileProvisionPList(mobileprovision, tmpFile):
return None
return getPlistValue(tmpFile, key)
def findBestIcon(bundlePath, bundleInfoPlist):
bestIcon = None
bestSize = 0.0
for key in [":CFBundleIcons:CFBundlePrimaryIcon:CFBundleIconFiles", ":CFBundleIcons~ipad:CFBundlePrimaryIcon:CFBundleIconFiles"]:
for m in re.finditer(r"\w+(\d+(?:\.\d+)?)x\1", getPlistValue(bundleInfoPlist, key)):
size = float(m.group(1))
for scale, scaleSuffix in [(1, ""), (2, "@2x"), (3, "@3x")]:
iconSize = size * scale
if bestIcon is None or iconSize > bestSize:
for deviceSuffix in ["", "~iphone", "~ipad"]:
icon = os.path.join(bundlePath, m.group() + scaleSuffix + deviceSuffix + ".png")
if os.path.isfile(icon):
bestIcon = icon
bestSize = iconSize
return bestIcon
def findSigningIdentity(teamIdentifier):
output = subprocess.check_output(["security", "find-identity", "-v", "-p", "codesigning"])
match = re.search(r"iPhone Distribution: .* \(" + teamIdentifier + "\)", output)
if match is None:
log.e("Error: Failed to automatically find signing identity.")
sys.exit(1)
return match.group(0)
def findMobileProvisionAndSigningIdentity(bundleIdentifier):
for mobileprovision in glob.iglob(os.path.expanduser(MOBILE_PROVISIONS)):
tmpFile = os.path.join(tmpDir, "tmp.plist")
if not writeMobileProvisionPList(mobileprovision, tmpFile):
continue
mpBundleId = getPlistValue(tmpFile, ":Entitlements:application-identifier")
mpTeamId = getPlistValue(tmpFile, ":TeamIdentifier:0")
if mpTeamId + "." + bundleIdentifier != mpBundleId:
continue
if getPlistValue(tmpFile, ":Platform:0") != "iOS":
continue
if getPlistValue(tmpFile, ":Entitlements:get-task-allow") == "true":
continue
if getPlistValue(tmpFile, ":ProvisionedDevices") == "":
continue
signingIdentity = findSigningIdentity(mpTeamId)
return (mobileprovision, signingIdentity)
return (None, None)
class DropboxUploader:
def __init__(self, uploaderDir):
self.script = os.path.join(uploaderDir, "dropbox_uploader.sh")
requireFile(self.script, "Dropbox uploader script")
requireFile(os.path.expanduser("~/.dropbox_uploader"), "Dropbox uploader config file", "Please run " + self.script + "to set up dropbox_uploader. The 'App permission' mode is recommended.")
def upload(self, source, dest):
subprocess.check_call([self.script, "-q", "upload", source, dest])
def share(self, path):
return subprocess.check_output([self.script, "-q", "share", path]).strip().replace("?dl=0", "").replace("www.dropbox.com", "dl.dropboxusercontent.com")
def run(args):
scriptDir = os.path.dirname(sys.argv[0])
templateDir = os.path.join(scriptDir, "templates")
binDir = os.path.join(scriptDir, "bin")
manifestTemplate = os.path.join(templateDir, MANIFEST_PLIST)
manifestTarget = os.path.join(tmpDir, MANIFEST_PLIST)
indexTemplate = os.path.join(templateDir, INDEX_HTML)
indexTarget = os.path.join(tmpDir, INDEX_HTML)
dropboxUploader = DropboxUploader(os.path.join(scriptDir, "externals", "Dropbox-Uploader"))
bundlePath = args.bundle
bundleInfoPlist = os.path.join(bundlePath, "Info.plist")
bundleEmbeddedMobileProvision = os.path.join(bundlePath, "embedded.mobileprovision")
packageApplication = os.path.join(tmpDir, "PackageApplication")
packageApplicationOrig = os.path.join(scriptDir, "externals", "PackageApplication", "PackageApplication")
packageApplicationPatch = os.path.join(scriptDir, "PackageApplication.patch")
# package application needs absolute path:
ipaTarget = os.path.realpath(os.path.join(tmpDir, OUTPUT_IPA))
requireFile(manifestTemplate, "Manifest template")
requireFile(indexTemplate, "Index template")
requireDir(bundlePath, "Bundle")
requireFile(bundleInfoPlist, "Bundle Info.plist")
requireFile(bundleEmbeddedMobileProvision, "Bundle embedded.mobileprovision")
log.v("Gathering Info...")
bundleIdentifier = getPlistValue(bundleInfoPlist, ":CFBundleIdentifier")
requireMatch(r"^\w+(\.\w+)*$", bundleIdentifier, "Bundle Identifier")
bundleVersion = getPlistValue(bundleInfoPlist, ":CFBundleVersion")
requireMatch(r"^\d+(\.\d+)*$", bundleVersion, "Bundle Version")
bundleDisplayName = getPlistValue(bundleInfoPlist, ":CFBundleDisplayName")
requireMatch(r"^.+$", bundleDisplayName, "Bundle Name")
iconTarget = findBestIcon(bundlePath, bundleInfoPlist)
dropboxRoot = os.path.join(args.dropbox_root, bundleIdentifier)
ipaDropboxTarget = os.path.join(dropboxRoot, OUTPUT_IPA)
iconDropboxTarget = os.path.join(dropboxRoot, ICON_PNG)
manifestDropboxTarget = os.path.join(dropboxRoot, MANIFEST_PLIST)
indexDropboxTarget = os.path.join(dropboxRoot, INDEX_HTML)
log.v(" Bundle Identifier = ", bundleIdentifier)
log.v(" Bundle Version = ", bundleVersion)
log.v(" Bundle Name = ", bundleDisplayName)
log.v(" Best Icon = ", os.path.basename(iconTarget))
log.v(" Dropbox Target = ", dropboxRoot)
log.v(" done")
log.v("Determining (re)signing info...")
(mobileprovision, signingIdentity) = findMobileProvisionAndSigningIdentity(bundleIdentifier)
if args.signing_identity is not None:
signingIdentity = args.signing_identity
log.v(" Signing Identity = ", signingIdentity)
if args.mobile_provision is not None:
mobileprovision = args.mobile_provision
log.v(" Mobile Provision = ", mobileprovision)
if args.check_only:
return
log.v("Packaging application...")
shutil.copy(packageApplicationOrig, packageApplication)
subprocess.check_output(["patch", packageApplication, packageApplicationPatch])
subprocess.check_output(["chmod", "+x", packageApplication])
subprocess.check_call([packageApplication, bundlePath, "-s", signingIdentity, "-o", ipaTarget, "--embed", mobileprovision])
log.v(" done")
log.v("Uploading IPA to Dropbox...")
dropboxUploader.upload(ipaTarget, ipaDropboxTarget)
ipaDropboxUrl = dropboxUploader.share(ipaDropboxTarget)
dropboxUploader.upload(iconTarget, iconDropboxTarget)
iconDropboxUrl = dropboxUploader.share(iconDropboxTarget)
log.v(" done")
log.v("Creating manifest...")
with open(manifestTemplate, "r") as fIn:
with open(manifestTarget, "w") as fOut:
fOut.write(string.Template(fIn.read()).safe_substitute(
IpaUrl = ipaDropboxUrl,
BundleIdentifier = bundleIdentifier,
BundleVersion = bundleVersion,
Title = bundleDisplayName,
IconUrl = iconDropboxUrl
))
dropboxUploader.upload(manifestTarget, manifestDropboxTarget)
manifestDropboxUrl = dropboxUploader.share(manifestDropboxTarget)
log.v(" done")
log.v("Creating index...")
with open(indexTemplate, "r") as fIn:
with open(indexTarget, "w") as fOut:
fOut.write(string.Template(fIn.read()).safe_substitute(
Title = bundleDisplayName,
About = "",
IconUrl = iconDropboxUrl,
BundleVersion = bundleVersion,
IpaSize = "%.1f MiB" % (os.path.getsize(ipaTarget) / 1048576.0),
EscapedManifestUrl = urllib.quote(manifestDropboxUrl, safe = '')
))
dropboxUploader.upload(indexTarget, indexDropboxTarget)
indexDropboxUrl = dropboxUploader.share(indexDropboxTarget)
log.v(" done")
log.v("")
log.v("Link to OTA install page:")
log.o(indexDropboxUrl)
if __name__ == "__main__":
try:
tmpDir = tempfile.mkdtemp()
parser = argparse.ArgumentParser(
description = "Upload AdHoc iPhone builds to Dropbox, for OTA installation on devices."
)
parser.add_argument(
"--check-only",
action = "store_const",
const = True,
default = False,
help = "Only perform checks, don't upload anything.")
parser.add_argument(
"--dropbox-root",
default = DEFAULT_DROPBOX_ROOT,
help = "Path in DropBox to put builds. This path is either relative to your Dropbox root or the uploader's folder in Apps depending on how you have set up dropbox_uploader. (Default: %(default)s)")
parser.add_argument(
"-s", "--signing-identity",
help = "Signing identify to use when signing the IPA file. If not supplied the program will try to automatically find one.")
parser.add_argument(
"--mobile-provision",
help = "Path to mobile provision to embed within the IPA file. If not supplied the problem will try to automatically find one.")
parser.add_argument(
"-q", "--quiet",
action = "store_const",
const = True,
default = False,
help = "Supress all output except the final HTML URL.")
parser.add_argument(
"bundle",
help = "Path to built .app bundle.")
args = parser.parse_args()
log = Logger(args.quiet)
run(args)
finally:
shutil.rmtree(tmpDir)
|
|
''' Tests for fortran sequential files '''
import tempfile
import shutil
from os import path
from glob import iglob
import re
from numpy.testing import assert_equal, assert_allclose
import numpy as np
import pytest
from scipy.io import (FortranFile,
_test_fortran,
FortranEOFError,
FortranFormattingError)
DATA_PATH = path.join(path.dirname(__file__), 'data')
def test_fortranfiles_read():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
dtype = m.group(1).replace('s', '<')
f = FortranFile(filename, 'r', '<u4')
data = f.read_record(dtype=dtype).reshape(dims, order='F')
f.close()
expected = np.arange(np.prod(dims)).reshape(dims).astype(dtype)
assert_equal(data, expected)
def test_fortranfiles_mixed_record():
filename = path.join(DATA_PATH, "fortran-mixed.dat")
with FortranFile(filename, 'r', '<u4') as f:
record = f.read_record('<i4,<f4,<i8,(2)<f8')
assert_equal(record['f0'][0], 1)
assert_allclose(record['f1'][0], 2.3)
assert_equal(record['f2'][0], 4)
assert_allclose(record['f3'][0], [5.6, 7.8])
def test_fortranfiles_write():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
dtype = m.group(1).replace('s', '<')
data = np.arange(np.prod(dims)).reshape(dims).astype(dtype)
tmpdir = tempfile.mkdtemp()
try:
testFile = path.join(tmpdir,path.basename(filename))
f = FortranFile(testFile, 'w','<u4')
f.write_record(data.T)
f.close()
originalfile = open(filename, 'rb')
newfile = open(testFile, 'rb')
assert_equal(originalfile.read(), newfile.read(),
err_msg=filename)
originalfile.close()
newfile.close()
finally:
shutil.rmtree(tmpdir)
def test_fortranfile_read_mixed_record():
# The data file fortran-3x3d-2i.dat contains the program that
# produced it at the end.
#
# double precision :: a(3,3)
# integer :: b(2)
# ...
# open(1, file='fortran-3x3d-2i.dat', form='unformatted')
# write(1) a, b
# close(1)
#
filename = path.join(DATA_PATH, "fortran-3x3d-2i.dat")
with FortranFile(filename, 'r', '<u4') as f:
record = f.read_record('(3,3)f8', '2i4')
ax = np.arange(3*3).reshape(3, 3).astype(np.double)
bx = np.array([-1, -2], dtype=np.int32)
assert_equal(record[0], ax.T)
assert_equal(record[1], bx.T)
def test_fortranfile_write_mixed_record(tmpdir):
tf = path.join(str(tmpdir), 'test.dat')
records = [
(('f4', 'f4', 'i4'), (np.float32(2), np.float32(3), np.int32(100))),
(('4f4', '(3,3)f4', '8i4'), (np.random.randint(255, size=[4]).astype(np.float32),
np.random.randint(255, size=[3, 3]).astype(np.float32),
np.random.randint(255, size=[8]).astype(np.int32)))
]
for dtype, a in records:
with FortranFile(tf, 'w') as f:
f.write_record(*a)
with FortranFile(tf, 'r') as f:
b = f.read_record(*dtype)
assert_equal(len(a), len(b))
for aa, bb in zip(a, b):
assert_equal(bb, aa)
def test_fortran_roundtrip(tmpdir):
filename = path.join(str(tmpdir), 'test.dat')
np.random.seed(1)
# double precision
m, n, k = 5, 3, 2
a = np.random.randn(m, n, k)
with FortranFile(filename, 'w') as f:
f.write_record(a.T)
a2 = _test_fortran.read_unformatted_double(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3 = f.read_record('(2,3,5)f8').T
assert_equal(a2, a)
assert_equal(a3, a)
# integer
m, n, k = 5, 3, 2
a = np.random.randn(m, n, k).astype(np.int32)
with FortranFile(filename, 'w') as f:
f.write_record(a.T)
a2 = _test_fortran.read_unformatted_int(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3 = f.read_record('(2,3,5)i4').T
assert_equal(a2, a)
assert_equal(a3, a)
# mixed
m, n, k = 5, 3, 2
a = np.random.randn(m, n)
b = np.random.randn(k).astype(np.intc)
with FortranFile(filename, 'w') as f:
f.write_record(a.T, b.T)
a2, b2 = _test_fortran.read_unformatted_mixed(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3, b3 = f.read_record('(3,5)f8', '2i4')
a3 = a3.T
assert_equal(a2, a)
assert_equal(a3, a)
assert_equal(b2, b)
assert_equal(b3, b)
def test_fortran_eof_ok(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
assert len(f.read_reals()) == 3
with pytest.raises(FortranEOFError):
f.read_reals()
def test_fortran_eof_broken_size(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "ab") as f:
f.write(b"\xff")
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
assert len(f.read_reals()) == 3
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_bogus_size(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "w+b") as f:
f.write(b"\xff\xff")
with FortranFile(filename, 'r') as f:
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_eof_broken_record(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "ab") as f:
f.truncate(path.getsize(filename)-20)
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_eof_multidimensional(tmpdir):
filename = path.join(str(tmpdir), "scratch")
n, m, q = 3, 5, 7
dt = np.dtype([("field", np.float64, (n, m))])
a = np.zeros(q, dtype=dt)
with FortranFile(filename, 'w') as f:
f.write_record(a[0])
f.write_record(a)
f.write_record(a)
with open(filename, "ab") as f:
f.truncate(path.getsize(filename)-20)
with FortranFile(filename, 'r') as f:
assert len(f.read_record(dtype=dt)) == 1
assert len(f.read_record(dtype=dt)) == q
with pytest.raises(FortranFormattingError):
f.read_record(dtype=dt)
|
|
"""
from search_command import SearchCommand
import sys
class Echo(SearchCommand):
def __init__(self, what_to_echo="Hello World"):
# Save the parameters
self.what_to_echo = what_to_echo
# Initialize the class
SearchCommand.__init__( self, run_in_preview=True, logger_name='echo_search_command')
def handle_results(self, results, in_preview, session_key):
self.output_results({'echo' : self.what_to_echo})
if __name__ == '__main__':
try:
Echo.execute()
sys.exit(0)
except Exception as e:
print e
"""
import splunk.Intersplunk
import sys
import os
import logging
from logging import handlers
from splunk import SplunkdConnectionException
from splunk.appserver.mrsparkle.lib.util import make_splunkhome_path
class SearchCommand(object):
# List of valid parameters
PARAM_RUN_IN_PREVIEW = "run_in_preview"
PARAM_DEBUG = "debug"
VALID_PARAMS = [ PARAM_RUN_IN_PREVIEW, PARAM_DEBUG ]
def __init__(self, run_in_preview=False, logger_name='python_search_command', log_level=logging.INFO ):
"""
Constructs an instance of the search command.
Arguments:
run_in_preview -- Indicates whether the search command should run in preview mode
logger_name -- The logger name to append to the logger
"""
self.run_in_preview = False
# Check and save the logger name
self._logger = None
if logger_name is None or len(logger_name) == 0:
raise Exception("Logger name cannot be empty")
self.logger_name = logger_name
self.log_level = log_level
# self.logger.info("args" + str(args))
@property
def logger(self):
# Make a logger unless it already exists
if self._logger is not None:
return self._logger
logger = logging.getLogger(self.logger_name)
logger.propagate = False # Prevent the log messages from being duplicated in the python.log file
logger.setLevel(self.log_level)
file_handler = handlers.RotatingFileHandler(make_splunkhome_path(['var', 'log', 'splunk', self.logger_name + '.log']), maxBytes=25000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
self._logger = logger
return self._logger
@logger.setter
def logger(self, logger):
self._logger = logger
@classmethod
def parse_argument(cls, argument):
"""
Parses an argument in the form of name=value and returns the name and value as two arguments
Arguments:
argument -- The argument that should be split into a name/value pair (i.e. name=value)
"""
# Find the character that splits the name from the value (returns -1 if it cannot be found)
splitter = argument.find('=')
# If no equal-sign was found then initialize the value to None
if splitter < 0:
name = argument
value = None
# If a splitter was found, then parse the value
else:
name = argument[0:splitter]
value = argument[splitter+1:len(argument)]
# Return the results
return name, value
@classmethod
def get_arguments(cls):
"""
Get the arguments as args and kwargs so that they can be processed into a constructor call to a search command.
"""
kwargs = {}
args = []
# Iterate through the arguments and initialize the corresponding argument
if len(sys.argv) > 1:
# Iterate through each argument
for a in sys.argv[1:]:
# Parse the argument
name, value = cls.parse_argument( a )
# If the argument has no value then it was an unnamed argument. Put it in the arguments array
if value is None:
args.append(value)
else:
# Put the argument in a dictionary
kwargs[name] = value
return args, kwargs
@classmethod
def make_instance(cls):
"""
Produce an instance of the search command with arguments from the command-line.
"""
args, kwargs = cls.get_arguments()
return cls(*args, **kwargs)
@classmethod
def execute(cls):
"""
Initialize an instance and run it.
"""
try:
instance = cls.make_instance()
instance.run()
except Exception as e:
splunk.Intersplunk.parseError( str(e) )
# self.logger.exception("Search command threw an exception")
def run(self, results=None):
try:
# Get the results from Splunk (unless results were provided)
if results is None:
results, dummyresults, settings = splunk.Intersplunk.getOrganizedResults()
session_key = settings.get('sessionKey', None)
# Don't write out the events in preview mode
in_preview = settings.get('preview', '0')
# If run_in_preview is undefined, then just continue
if self.run_in_preview is None:
pass
# Don't do anything if the command is supposed to run in preview but the results are not preview results
elif self.run_in_preview and in_preview == "0":
# Don't run in non-preview mode since we already processed the events in preview mode
if len(results) > 0:
self.logger.info( "Search command is set to run in preview, ignoring %d results provided in non-preview mode" % ( len(results) ) )
return None
# Don't do anything if the command is NOT supposed to run in preview but the results are previewed results
elif not self.run_in_preview and not in_preview == "0":
return None
else:
settings = None
# Execute the search command
self.handle_results(results, session_key, in_preview)
except Exception as e:
splunk.Intersplunk.parseError( str(e) )
self.logger.exception("Search command threw an exception")
def output_results(self, results):
"""
Output results to Splunk.
Arguments:
results -- An array of dictionaries of fields/values to send to Splunk.
"""
splunk.Intersplunk.outputResults(results)
def handle_results(self, results, in_preview, session_key):
"""
Arguments:
results -- The results from Splunk to process
in_preview -- Whether the search is running in preview
session_key -- The session key to use for connecting to Splunk
"""
raise Exception("handle_results needs to be implemented")
|
|
import pytest
from Analyst1 import *
MOCK_SERVER: str = 'mock.com'
MOCK_USER: str = 'mock'
MOCK_PASS: str = 'mock'
MOCK_INDICATOR: str = 'mock-indicator'
BASE_MOCK_JSON: dict = {
'type': 'domain',
'value': {
'name': f'{MOCK_INDICATOR}',
'classification': 'U'
},
'description': None,
'activityDates': [
{
'date': '2020-01-20',
'classification': 'U'
}
],
'reportedDates': [
{
'date': '2020-01-31',
'classification': 'U'
}
],
'targets': [
{
'name': 'Mock Target',
'id': 1,
'classification': 'U'
}
],
'attackPatterns': [
{
'name': 'Mock Attack Pattern',
'id': 1,
'classification': 'U'
}
],
'actors': [
{
'name': 'Mock Actor',
'id': 1,
'classification': 'U'
}
],
'malwares': [],
'status': 'aw',
'hashes': None,
'fileNames': None,
'fileSize': None,
'path': None,
'ports': [],
'ipRegistration': None,
'domainRegistration': None,
'ipResolution': None,
'originatingIps': None,
'subjects': None,
'requestMethods': None,
'tlp': 'mocktlp',
'tlpJustification': None,
'tlpCaveats': None,
'tlpResolution': 'resolved',
'tlpHighestAssociated': 'mocktlp',
'tlpLowestAssociated': 'mocktlp',
'active': True,
'benign': {
'value': False,
'classification': 'U'
},
'confidenceLevel': None,
'exploitStage': None,
'lastHit': None,
'firstHit': None,
'hitCount': None,
'reportCount': 1,
'verified': False,
'tasked': False,
'links': [
{
'rel': 'self',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
},
{
'rel': 'evidence',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1/evidence',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
},
{
'rel': 'stix',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1/stix',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
}
],
'id': 1
}
MOCK_CLIENT_PARAMS = {
'server': MOCK_SERVER,
'proxy': 'false',
'insecure': 'true',
'credentials': {
'identifier': MOCK_USER,
'password': MOCK_PASS
}
}
@pytest.fixture
def mock_client():
return build_client(MOCK_CLIENT_PARAMS)
def mock_indicator_search(indicator_type: str, requests_mock):
requests_mock.get(
f'https://{MOCK_SERVER}/api/1_0/indicator/match?type={indicator_type}&value={MOCK_INDICATOR}',
json=BASE_MOCK_JSON
)
def test_domain_command(requests_mock, mock_client):
mock_indicator_search('domain', requests_mock)
args: dict = {'domain': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = domain_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_email_command(requests_mock, mock_client):
mock_indicator_search('email', requests_mock)
args: dict = {'email': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = email_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_ip_command(requests_mock, mock_client):
mock_indicator_search('ip', requests_mock)
args: dict = {'ip': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = ip_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_file_command(requests_mock, mock_client):
mock_indicator_search('file', requests_mock)
args: dict = {'file': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = file_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_url_command(requests_mock, mock_client):
mock_indicator_search('url', requests_mock)
args: dict = {'url': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = url_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_analyst1_enrich_string_command(requests_mock, mock_client):
mock_indicator_search('string', requests_mock)
args: dict = {'string': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = analyst1_enrich_string_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_analyst1_enrich_ipv6_command(requests_mock, mock_client):
mock_indicator_search('ipv6', requests_mock)
args: dict = {'ip': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = analyst1_enrich_ipv6_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_analyst1_enrich_mutex_command(requests_mock, mock_client):
mock_indicator_search('mutex', requests_mock)
args: dict = {'mutex': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = analyst1_enrich_mutex_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_analyst1_enrich_http_request_command(requests_mock, mock_client):
mock_indicator_search('httpRequest', requests_mock)
args: dict = {'http-request': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = analyst1_enrich_http_request_command(mock_client, args)[0]
assert enrichment_output.analyst1_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_malicious_indicator_check_empty(mock_client):
data = {}
assert mock_client.is_indicator_malicious(data) is False
def test_malicious_indicator_check_benign_false(mock_client):
data = {
"benign": {
"value": False
}
}
assert mock_client.is_indicator_malicious(data) is True
def test_malicious_indicator_check_benign_true(mock_client):
data = {
"benign": {
"value": True
}
}
assert mock_client.is_indicator_malicious(data) is False
|
|
from __future__ import absolute_import
import sys
from six import StringIO
from mock import patch
from .testcases import DockerClientTestCase
from fig.cli.main import TopLevelCommand
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-figfile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
@property
def project(self):
return self.command.get_project(self.command.get_config_path())
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-figfile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
# self.project.kill() fails during teardown
# unless there is a figfile.
self.command.base_dir = old_base_dir
@patch('sys.stdout', new_callable=StringIO)
def test_ps(self, mock_stdout):
self.project.get_service('simple').create_container()
self.command.dispatch(['ps'], None)
self.assertIn('simplefigfile_simple_1', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_ps_default_figfile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
output = mock_stdout.getvalue()
self.assertIn('multiplefigfiles_simple_1', output)
self.assertIn('multiplefigfiles_another_1', output)
self.assertNotIn('multiplefigfiles_yetanother_1', output)
@patch('sys.stdout', new_callable=StringIO)
def test_ps_alternate_figfile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
self.command.dispatch(['-f', 'fig2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'fig2.yml', 'ps'], None)
output = mock_stdout.getvalue()
self.assertNotIn('multiplefigfiles_simple_1', output)
self.assertNotIn('multiplefigfiles_another_1', output)
self.assertIn('multiplefigfiles_yetanother_1', output)
@patch('fig.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
@patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
mock_stdout.truncate(0)
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
def test_up(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_keep_old(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
@patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
self.assertEqual(len(self.project.containers()), 0)
@patch('dockerpty.start')
def test_run_service_with_links(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@patch('dockerpty.start')
def test_run_with_no_deps(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, __):
self.command.base_dir = 'tests/fixtures/links-figfile'
self.command.dispatch(['up', '-d', 'db'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@patch('dockerpty.start')
def test_run_without_command(self, __):
self.command.base_dir = 'tests/fixtures/commands-figfile'
self.check_build('tests/fixtures/simple-dockerfile', tag='figtest_test')
for c in self.project.containers(stopped=True, one_off=True):
c.remove()
self.command.dispatch(['run', 'implicit'], None)
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
container.human_readable_command,
u'/bin/echo helloworld'
)
@patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-figfile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigint(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has been only interrupted
self.assertTrue(service.containers()[0].is_running)
def test_kill_interrupted_service(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-figfile'
self.command.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:9999")
self.assertEqual(get_port(3002), "")
|
|
from __future__ import unicode_literals
from collections import OrderedDict
import hashlib
import os
import posixpath
import re
import json
from django.conf import settings
from django.core.cache import (caches, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six.moves.urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Returns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def stored_name(self, name):
hash_key = self.hash_key(name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
# store the hashed name if there was a miss, e.g.
# when the files are still processed
self.hashed_files[hash_key] = cache_name
return cache_name
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version', None)
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key, None)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
from __future__ import print_function
from __future__ import unicode_literals
import multiprocessing.pool
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout or 1 << 31)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import setup_color
import shutil
import signal
import sys
import tempfile
import textwrap
import threading
import subprocess2
from io import BytesIO
ROOT = os.path.abspath(os.path.dirname(__file__))
IS_WIN = sys.platform == 'win32'
TEST_MODE = False
def win_find_git():
for elem in os.environ.get('PATH', '').split(os.pathsep):
for candidate in ('git.exe', 'git.bat'):
path = os.path.join(elem, candidate)
if os.path.isfile(path):
return path
raise ValueError('Could not find Git on PATH.')
GIT_EXE = 'git' if not IS_WIN else win_find_git()
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# NOTE: This list is DEPRECATED in favor of the Infra Git wrapper:
# https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git
#
# New entries should be added to the Git wrapper, NOT to this list. "git_retry"
# is, similarly, being deprecated in favor of the Git wrapper.
#
# ---
#
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
r'Connection reset by peer',
r'Unable to look up',
r'Couldn\'t resolve host',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# git's for-each-ref command first supported the upstream:track token in its
# format string in version 1.9.0, but some usages were broken until 2.3.0.
# See git commit b6160d95 for more information.
MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
has_run = [False]
def _wrapper(*args, **kwargs):
if not has_run[0]:
has_run[0] = True
function(*args, **kwargs)
return _wrapper
def unicode_repr(s):
result = repr(s)
return result[1:] if result.startswith('u') else result
## Git functions
def die(message, *args):
print(textwrap.dedent(message % args), file=sys.stderr)
sys.exit(1)
def blame(filename, revision=None, porcelain=False, abbrev=None, *_args):
command = ['blame']
if porcelain:
command.append('-p')
if revision is not None:
command.append(revision)
if abbrev is not None:
command.append('--abbrev=%d' % abbrev)
command.extend(['--', filename])
return run(*command)
def branch_config(branch, option, default=None):
return get_config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = get_config_regexp(reg.pattern)
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(use_limit=True, *args):
NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached')
key = 'depot-tools.branch-limit'
limit = get_config_int(key, 20)
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if use_limit and num > limit:
die("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
You may also try cleaning up your old branches by running:
git cl archive
""", num, limit, key)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def get_config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def get_config_int(option, default=0):
assert isinstance(default, int)
try:
return int(get_config(option, default))
except ValueError:
return default
def get_config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def get_config_regexp(pattern):
if IS_WIN: # pragma: no cover
# this madness is because we call git.bat which calls git.exe which calls
# bash.exe (or something to that effect). Each layer divides the number of
# ^'s by 2.
pattern = pattern.replace('^', '^' * 8)
return run('config', '--get-regexp', pattern).splitlines()
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def diff(oldrev, newrev, *args):
return run('diff', oldrev, newrev, *args)
def freeze():
took_action = False
key = 'depot-tools.freeze-size-limit'
MB = 2**20
limit_mb = get_config_int(key, 100)
untracked_bytes = 0
root_path = repo_root()
for f, s in status():
if is_unmerged(s):
die("Cannot freeze unmerged changes!")
if limit_mb > 0:
if s.lstat == '?':
untracked_bytes += os.stat(os.path.join(root_path, f)).st_size
if limit_mb > 0 and untracked_bytes > limit_mb * MB:
die("""\
You appear to have too much untracked+unignored data in your git
checkout: %.1f / %d MB.
Run `git status` to see what it is.
In addition to making many git commands slower, this will prevent
depot_tools from freezing your in-progress changes.
You should add untracked data that you want to ignore to your repo's
.git/info/exclude
file. See `git help ignore` for the format of this file.
If this data is indended as part of your commit, you may adjust the
freeze limit by running:
git config %s <new_limit>
Where <new_limit> is an integer threshold in megabytes.""",
untracked_bytes / (MB * 1.0), limit_mb, key)
try:
run('commit', '--no-verify', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
add_errors = False
try:
run('add', '-A', '--ignore-errors')
except subprocess2.CalledProcessError:
add_errors = True
try:
run('commit', '--no-verify', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
ret = []
if add_errors:
ret.append('Failed to index some unindexed files.')
if not took_action:
ret.append('Nothing to freeze.')
return ' '.join(ret) or None
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if parent is None or branch is None:
return None
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base and base != actual_merge_base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def is_unmerged(stat_value):
return (
'U' in (stat_value.lstat, stat_value.rstat) or
((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD')
)
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.items():
f.write(('%s %s %s\t%s\0' % (mode, typ, ref, name)).encode('utf-8'))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return [binascii.unhexlify(h) for h in hash_multi(*commitrefs)]
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run_with_retcode('rebase', '--abort') # ignore failure
return RebaseRet(False, cpe.stdout, cpe.stderr)
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def repo_root():
"""Returns the absolute path to the repository root."""
return run('rev-parse', '--show-toplevel')
def root():
return get_config('depot-tools.upstream', 'origin/master')
@contextlib.contextmanager
def less(): # pragma: no cover
"""Runs 'less' as context manager yielding its stdin as a PIPE.
Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids
running less and just yields sys.stdout.
"""
if not setup_color.IS_TTY:
yield sys.stdout
return
# Run with the same options that git uses (see setup_pager in git repo).
# -F: Automatically quit if the output is less than one screen.
# -R: Don't escape ANSI color codes.
# -X: Don't clear the screen before starting.
cmd = ('less', '-FRX')
try:
proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE)
yield proc.stdin
finally:
proc.stdin.close()
proc.wait()
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
@contextlib.contextmanager
def run_stream_with_retcode(*cmd, **kwargs):
"""Runs a git command as context manager yielding stdout as a PIPE.
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
Raises subprocess2.CalledProcessError on nonzero return code.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
try:
proc = subprocess2.Popen(cmd, **kwargs)
yield proc.stdout
finally:
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(),
None, None)
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
kwargs.setdefault('shell', False)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
decode = kwargs.pop('decode', True)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or b'').strip()
err = (err or b'').strip()
if decode:
ret = ret.decode('utf-8', 'replace')
err = err.decode('utf-8', 'replace')
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def get_dirty_files():
# Make sure index is up-to-date before running diff-index.
run_with_retcode('update-index', '--refresh', '-q')
return run('diff-index', '--ignore-submodules', '--name-status', 'HEAD')
def is_dirty_git_tree(cmd):
w = lambda s: sys.stderr.write(s+"\n")
dirty = get_dirty_files()
if dirty:
w('Cannot %s with a dirty tree. Commit, freeze or stash your changes first.'
% cmd)
w('Uncommitted files: (git diff-index --name-status HEAD)')
w(dirty[:4096])
if len(dirty) > 4096: # pragma: no cover
w('... (run "git diff-index --name-status HEAD" to see full output).')
return True
return False
def status():
"""Returns a parsed version of git-status.
Returns a generator of (current_name, (lstat, rstat, src)) pairs where:
* current_name is the name of the file
* lstat is the left status code letter from git-status
* rstat is the left status code letter from git-status
* src is the current name of the file, or the original name of the file
if lstat == 'R'
"""
stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src')
def tokenizer(stream):
acc = BytesIO()
c = None
while c != b'':
c = stream.read(1)
if c in (None, b'', b'\0'):
if len(acc.getvalue()):
yield acc.getvalue()
acc = BytesIO()
else:
acc.write(c)
def parser(tokens):
while True:
try:
status_dest = next(tokens).decode('utf-8')
except StopIteration:
return
stat, dest = status_dest[:2], status_dest[3:]
lstat, rstat = stat
if lstat == 'R':
src = next(tokens).decode('utf-8')
else:
src = dest
yield (dest, stat_entry(lstat, rstat, src))
return parser(tokenizer(run_stream('status', '-z', bufsize=-1)))
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit for %s.' % current_branch()
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
if not get_dirty_files():
# Sometimes the squash can result in the same tree, meaning that there is
# nothing to commit at this point.
print('Nothing to commit; squashed branch is empty')
return False
run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg.encode('utf-8'))
return True
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in run_stream('rev-list', 'HEAD').readlines():
sha = sha.strip().decode('utf-8')
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.items():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's descendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def get_remote_url(remote='origin'):
try:
return run('config', 'remote.%s.url' % remote)
except subprocess2.CalledProcessError:
return None
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream ahead behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
ahead_match = re.search(r'ahead (\d+)', tracking_status)
ahead = int(ahead_match.group(1)) if ahead_match else None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, ahead=ahead, behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
result = info_map.copy()
result.update(missing_upstreams)
return result
def make_workdir_common(repository, new_workdir, files_to_symlink,
files_to_copy, symlink=None):
if not symlink:
symlink = os.symlink
os.makedirs(new_workdir)
for entry in files_to_symlink:
clone_file(repository, new_workdir, entry, symlink)
for entry in files_to_copy:
clone_file(repository, new_workdir, entry, shutil.copy)
def make_workdir(repository, new_workdir):
GIT_DIRECTORY_WHITELIST = [
'config',
'info',
'hooks',
'logs/refs',
'objects',
'packed-refs',
'refs',
'remotes',
'rr-cache',
]
make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST,
['HEAD'])
def clone_file(repository, new_workdir, link, operation):
if not os.path.exists(os.path.join(repository, link)):
return
link_dir = os.path.dirname(os.path.join(new_workdir, link))
if not os.path.exists(link_dir):
os.makedirs(link_dir)
src = os.path.join(repository, link)
if os.path.islink(src):
src = os.path.realpath(src)
operation(src, os.path.join(new_workdir, link))
|
|
##
# This module implements a hierarchy of authorities and performs a similar
# function as the "tree" module of the original SFA prototype. An HRN
# is assumed to be a string of authorities separated by dots. For example,
# "planetlab.us.arizona.bakers". Each component of the HRN is a different
# authority, with the last component being a leaf in the tree.
#
# Each authority is stored in a subdirectory on the registry. Inside this
# subdirectory are several files:
# *.GID - GID file
# *.PKEY - private key file
##
import os
from vt_manager.communication.sfa.util.faults import MissingAuthority
#from vt_manager.communication.sfa.util.vt_manager.communication.sfa.ogging import logger
from vt_manager.communication.sfa.util.xrn import get_leaf, get_authority, hrn_to_urn, urn_to_hrn
from vt_manager.communication.sfa.trust.certificate import Keypair
from vt_manager.communication.sfa.trust.credential import Credential
from vt_manager.communication.sfa.trust.gid import GID, create_uuid
from vt_manager.communication.sfa.sfa_config import config
from vt_manager.communication.sfa.trust.sfaticket import SfaTicket
#from vt_manager.communication.sfa.setUp import setup_config as auth_config
##
# The AuthInfo class contains the information for an authority. This information
# includes the GID, private key, and database connection information.
class AuthInfo:
hrn = None
gid_object = None
gid_filename = None
privkey_filename = None
##
# Initialize and authority object.
#
# @param xrn the human readable name of the authority (urn will be converted to hrn)
# @param gid_filename the filename containing the GID
# @param privkey_filename the filename containing the private key
def __init__(self, xrn, gid_filename, privkey_filename):
hrn, type = urn_to_hrn(xrn)
self.hrn = hrn
self.set_gid_filename(gid_filename)
self.privkey_filename = privkey_filename
##
# Set the filename of the GID
#
# @param fn filename of file containing GID
def set_gid_filename(self, fn):
self.gid_filename = fn
self.gid_object = None
def get_privkey_filename(self):
return self.privkey_filename
def get_gid_filename(self):
return self.gid_filename
##
# Get the GID in the form of a GID object
def get_gid_object(self):
if not self.gid_object:
self.gid_object = GID(filename = self.gid_filename)
return self.gid_object
##
# Get the private key in the form of a Keypair object
def get_pkey_object(self):
return Keypair(filename = self.privkey_filename)
##
# Replace the GID with a new one. The file specified by gid_filename is
# overwritten with the new GID object
#
# @param gid object containing new GID
def update_gid_object(self, gid):
gid.save_to_file(self.gid_filename)
self.gid_object = gid
##
# The Hierarchy class is responsible for managing the tree of authorities.
# Each authority is a node in the tree and exists as an AuthInfo object.
#
# The tree is stored on disk in a hierarchical manner than reflects the
# structure of the tree. Each authority is a subdirectory, and each subdirectory
# contains the GID and pkey files for that authority (as well as
# subdirectories for each sub-authority)
class Hierarchy:
##
# Create the hierarchy object.
#
# @param basedir the base directory to store the hierarchy in
def __init__(self, basedir = None):
self.config = config
if not basedir:
basedir = os.path.join(self.config.SFA_DATA_DIR, "authorities")
self.basedir = basedir
##
# Given a hrn, return the filenames of the GID, private key
# files.
#
# @param xrn the human readable name of the authority (urn will be convertd to hrn)
def get_auth_filenames(self, xrn):
hrn, type = urn_to_hrn(xrn)
leaf = get_leaf(hrn)
parent_hrn = get_authority(hrn)
directory = os.path.join(self.basedir, hrn.replace(".", "/"))
gid_filename = os.path.join(directory, leaf+".gid")
privkey_filename = os.path.join(directory, leaf+".pkey")
return (directory, gid_filename, privkey_filename)
##
# Check to see if an authority exists. An authority exists if it's disk
# files exist.
#
# @param the human readable name of the authority to check
def auth_exists(self, xrn):
hrn, type = urn_to_hrn(xrn)
(directory, gid_filename, privkey_filename) = \
self.get_auth_filenames(hrn)
print directory, gid_filename, privkey_filename
return os.path.exists(gid_filename) and os.path.exists(privkey_filename)
##
# Create an authority. A private key for the authority and the associated
# GID are created and signed by the parent authority.
#
# @param xrn the human readable name of the authority to create (urn will be converted to hrn)
# @param create_parents if true, also create the parents if they do not exist
def create_auth(self, xrn, create_parents=False):
hrn, type = urn_to_hrn(str(xrn))
# create the parent authority if necessary
parent_hrn = get_authority(hrn)
parent_urn = hrn_to_urn(parent_hrn, 'authority')
if (parent_hrn) and (not self.auth_exists(parent_urn)) and (create_parents):
self.create_auth(parent_urn, create_parents)
(directory, gid_filename, privkey_filename,) = \
self.get_auth_filenames(hrn)
# create the directory to hold the files
try:
os.makedirs(directory)
# if the path already exists then pass
except OSError, (errno, strerr):
if errno == 17:
pass
if os.path.exists(privkey_filename):
pkey = Keypair(filename = privkey_filename)
else:
pkey = Keypair(create = True)
pkey.save_to_file(privkey_filename)
gid = self.create_gid(xrn, create_uuid(), pkey)
gid.save_to_file(gid_filename, save_parents=True)
def create_top_level_auth(self, hrn=None):
"""
Create top level records (includes root and sub authorities (local/remote)
"""
# create the authority if it doesnt alrady exist
if not self.auth_exists(hrn):
self.create_auth(hrn, create_parents=True)
def get_interface_auth_info(self, create=True):
hrn = self.config.SFA_INTERFACE_HRN
if not self.auth_exists(hrn):
if create==True:
self.create_top_level_auth(hrn)
else:
raise MissingAuthority(hrn)
return self.get_auth_info(hrn)
##
# Return the AuthInfo object for the specified authority. If the authority
# does not exist, then an exception is thrown. As a side effect, disk files
# and a subdirectory may be created to store the authority.
#
# @param xrn the human readable name of the authority to create (urn will be converted to hrn).
def get_auth_info(self, xrn):
hrn, type = urn_to_hrn(xrn)
if not self.auth_exists(hrn):
raise MissingAuthority(hrn)
(directory, gid_filename, privkey_filename, ) = \
self.get_auth_filenames(hrn)
auth_info = AuthInfo(hrn, gid_filename, privkey_filename)
# check the GID and see if it needs to be refreshed
gid = auth_info.get_gid_object()
gid_refreshed = self.refresh_gid(gid)
if gid != gid_refreshed:
auth_info.update_gid_object(gid_refreshed)
return auth_info
##
# Create a new GID. The GID will be signed by the authority that is it's
# immediate parent in the hierarchy (and recursively, the parents' GID
# will be signed by its parent)
#
# @param hrn the human readable name to store in the GID
# @param uuid the unique identifier to store in the GID
# @param pkey the public key to store in the GID
def create_gid(self, xrn, uuid, pkey, CA=False, email=None):
hrn, type = urn_to_hrn(xrn)
if not type:
type = 'authority'
parent_hrn = get_authority(hrn)
# Using hrn_to_urn() here to make sure the urn is in the right format
# If xrn was a hrn instead of a urn, then the gid's urn will be
# of type None
urn = hrn_to_urn(hrn, type)
subject = self.get_subject(hrn)
if not subject:
subject = hrn
gid = GID(subject=subject, uuid=uuid, hrn=hrn, urn=urn, email=email)
# is this a CA cert
if hrn == self.config.SFA_INTERFACE_HRN or not parent_hrn:
# root or sub authority
gid.set_intermediate_ca(True)
elif type and 'authority' in type:
# authority type
gid.set_intermediate_ca(False)
elif CA:
gid.set_intermediate_ca(True)
else:
gid.set_intermediate_ca(False)
# set issuer
if not parent_hrn or hrn == self.config.SFA_INTERFACE_HRN:
# if there is no parent hrn, then it must be self-signed. this
# is where we terminate the recursion
gid.set_issuer(pkey, subject)
else:
# we need the parent's private key in order to sign this GID
parent_auth_info = self.get_auth_info(parent_hrn)
parent_gid = parent_auth_info.get_gid_object()
gid.set_issuer(parent_auth_info.get_pkey_object(), parent_gid.get_extended_subject())
gid.set_parent(parent_auth_info.get_gid_object())
gid.set_pubkey(pkey)
gid.encode()
gid.sign()
return gid
def get_subject(self,hrn):
if len(hrn.split('.'))>1:
subject = auth_config.SUBJECT
else:
subject = auth_config.PARENT_SUBJECT
return subject
##
# Refresh a GID. The primary use of this function is to refresh the
# the expiration time of the GID. It may also be used to change the HRN,
# UUID, or Public key of the GID.
#
# @param gid the GID to refresh
# @param hrn if !=None, change the hrn
# @param uuid if !=None, change the uuid
# @param pubkey if !=None, change the public key
def refresh_gid(self, gid, xrn=None, uuid=None, pubkey=None):
# TODO: compute expiration time of GID, refresh it if necessary
gid_is_expired = False
# update the gid if we need to
if gid_is_expired or xrn or uuid or pubkey:
if not xrn:
xrn = gid.get_urn()
if not uuid:
uuid = gid.get_uuid()
if not pubkey:
pubkey = gid.get_pubkey()
gid = self.create_gid(xrn, uuid, pubkey)
return gid
##
# Retrieve an authority credential for an authority. The authority
# credential will contain the authority privilege and will be signed by
# the authority's parent.
#
# @param hrn the human readable name of the authority (urn is converted to hrn)
# @param authority type of credential to return (authority | sa | ma)
def get_auth_cred(self, xrn, kind="authority"):
hrn, type = urn_to_hrn(xrn)
auth_info = self.get_auth_info(hrn)
gid = auth_info.get_gid_object()
cred = Credential(subject=hrn)
cred.set_gid_caller(gid)
cred.set_gid_object(gid)
cred.set_privileges(kind)
cred.get_privileges().delegate_all_privileges(True)
#cred.set_pubkey(auth_info.get_gid_object().get_pubkey())
parent_hrn = get_authority(hrn)
if not parent_hrn or hrn == self.config.SFA_INTERFACE_HRN:
# if there is no parent hrn, then it must be self-signed. this
# is where we terminate the recursion
cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
else:
# we need the parent's private key in order to sign this GID
parent_auth_info = self.get_auth_info(parent_hrn)
cred.set_issuer_keys(parent_auth_info.get_privkey_filename(), parent_auth_info.get_gid_filename())
cred.set_parent(self.get_auth_cred(parent_hrn, kind))
cred.encode()
cred.sign()
return cred
##
# Retrieve an authority ticket. An authority ticket is not actually a
# redeemable ticket, but only serves the purpose of being included as the
# parent of another ticket, in order to provide a chain of authentication
# for a ticket.
#
# This looks almost the same as get_auth_cred, but works for tickets
# XXX does similarity imply there should be more code re-use?
#
# @param xrn the human readable name of the authority (urn is converted to hrn)
def get_auth_ticket(self, xrn):
hrn, type = urn_to_hrn(xrn)
auth_info = self.get_auth_info(hrn)
gid = auth_info.get_gid_object()
ticket = SfaTicket(subject=hrn)
ticket.set_gid_caller(gid)
ticket.set_gid_object(gid)
ticket.set_delegate(True)
ticket.set_pubkey(auth_info.get_gid_object().get_pubkey())
parent_hrn = get_authority(hrn)
if not parent_hrn:
# if there is no parent hrn, then it must be self-signed. this
# is where we terminate the recursion
ticket.set_issuer(auth_info.get_pkey_object(), hrn)
else:
# we need the parent's private key in order to sign this GID
parent_auth_info = self.get_auth_info(parent_hrn)
ticket.set_issuer(parent_auth_info.get_pkey_object(), parent_auth_info.hrn)
ticket.set_parent(self.get_auth_cred(parent_hrn))
ticket.encode()
ticket.sign()
return ticket
|
|
"""
bjson/connection.py
Asynchronous Bidirectional JSON-RPC protocol implementation over TCP/IP
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# Local changes:
import errno
import inspect
import socket
import traceback
import sys
import threading
from types import MethodType, FunctionType
from bjsonrpc.proxies import Proxy
from bjsonrpc.request import Request
from bjsonrpc.exceptions import EofError, ServerError
from bjsonrpc import bjsonrpc_options
import bjsonrpc.jsonlib as json
import select
from quasar.logger import log as _log
class RemoteObject(object):
"""
Represents a object in the server-side (or client-side when speaking from
the point of view of the server) . It remembers its name in the server-side
to allow calls to the original object.
Parameters:
**conn**
Connection object which holds the socket to the other end
of the communications
**obj**
JSON object (Python dictionary) holding the values recieved.
It is used to retrieve the properties to create the remote object.
(Initially only used to get object name)
Example::
list = conn.call.newList()
for i in range(10): list.notify.add(i)
print list.call.getitems()
Attributes:
**name**
name of the object in the server-side
**call**
Synchronous Proxy. It forwards your calls to it to the other end, waits
the response and returns the value.
**method**
Asynchronous Proxy. It forwards your calls to it to the other end and
inmediatelly returns a *request.Request* instance.
**pipe**
Asynchronous Proxy for "pipe" calls with multiple returns, like
method but you can check request.value multiple times, and must
call request.close() when you're done.
**notify**
Notification Proxy. It forwards your calls to it to the other end and
tells the server to not response even if there's any error in the call.
Returns *None*.
"""
name = None
call = None
method = None
notify = None
pipe = None
@property
def connection(self):
"""
Public property to get the internal connection object.
"""
return self._conn
def __init__(self, conn, obj):
self._conn = conn
self.name = obj['__remoteobject__']
self.call = Proxy(self._conn, obj=self.name, sync_type=0)
self.method = Proxy(self._conn, obj=self.name, sync_type=1)
self.notify = Proxy(self._conn, obj=self.name, sync_type=2)
self.pipe = Proxy(self._conn, obj=self.name, sync_type=3)
def __del__(self):
self._close()
def _close(self):
"""
Internal close method called both by __del__() and public
method close()
"""
self.call.__delete__()
self.name = None
def close(self):
"""
Closes/deletes the remote object. The server may or may not delete
it at this time, but after this call we don't longer have any access to it.
This method is automatically called when Python deletes this instance.
"""
return self._close()
class Connection(object): # TODO: Split this class in simple ones
"""
Represents a communiation tunnel between two parties.
**sck**
Connected socket to use. Should be an instance of *socket.socket* or
something compatible.
**address**
Address of the other peer in (host,port) form. It is only used to
inform handlers about the peer address.
**handler_factory**
Class type inherited from BaseHandler which holds the public methods.
It defaults to *NullHandler* meaning no public methods will be
avaliable to the other end.
**Members:**
**call**
Synchronous Proxy. It forwards your calls to it to the other end, waits
the response and returns the value
**method**
Asynchronous Proxy. It forwards your calls to it to the other end and
inmediatelly returns a *request.Request* instance.
**notify**
Notification Proxy. It forwards your calls to it to the other end and
tells the server to not response even if there's any error in the call.
Returns *None*.
"""
_maxtimeout = {
'read': 60, # default maximum read timeout.
'write': 60, # default maximum write timeout.
}
_SOCKET_COMM_ERRORS = (errno.ECONNABORTED, errno.ECONNREFUSED,
errno.ECONNRESET, errno.ENETDOWN,
errno.ENETRESET, errno.ENETUNREACH)
call = None
method = None
notify = None
pipe = None
@classmethod
def setmaxtimeout(cls, operation, value):
"""
Set the maximum timeout in seconds for **operation** operation.
Parameters:
**operation**
The operation which has to be configured. Can be either 'read'
or 'write'.
**value**
The timeout in seconds as a floating number. If is None, will
block until succeed. If is 0, will be nonblocking.
"""
assert(operation in ['read', 'write'])
cls._maxtimeout[operation] = value
@classmethod
def getmaxtimeout(cls, operation):
"""
Get the maximum timeout in seconds for **operation** operation.
Parameters:
**operation**
The operation which has to be configured. Can be either 'read'
or 'write'.
**(return value)**
The timeout in seconds as a floating number or None.
"""
if operation not in cls._maxtimeout:
return None
return cls._maxtimeout[operation]
def __init__(self, sck, address=None, handler_factory=None):
self._debug_socket = False
self._debug_dispatch = False
self._buffer = b''
self._sck = sck
self._address = address
self._handler = handler_factory
self.connection_status = "open"
if self._handler:
self.handler = self._handler(self)
self._id = 0
self._requests = {}
self._objects = {}
self.scklock = threading.Lock()
self.call = Proxy(self, sync_type=0)
self.method = Proxy(self, sync_type=1)
self.notify = Proxy(self, sync_type=2)
self.pipe = Proxy(self, sync_type=3)
self._wbuffer = b''
self.write_lock = threading.RLock()
self.read_lock = threading.RLock()
self.getid_lock = threading.Lock()
self.reading_event = threading.Event()
self.threaded = bjsonrpc_options['threaded']
self.write_thread_queue = []
self.write_thread_semaphore = threading.Semaphore(0)
self.write_thread = threading.Thread(target=self.write_thread)
self.write_thread.daemon = True
self.write_thread.start()
@property
def socket(self):
"""
public property that holds the internal socket used.
"""
return self._sck
def get_id(self):
"""
Retrieves a new ID counter. Each connection has a exclusive ID counter.
It is mainly used to create internal id's for calls.
"""
self.getid_lock.acquire()
# Prevent two threads to execute this code simultaneously
self._id += 1
ret = self._id
self.getid_lock.release()
return ret
def load_object(self, obj):
"""
Helper function for JSON loads. Given a dictionary (javascript object) returns
an apropiate object (a specific class) in certain cases.
It is mainly used to convert JSON hinted classes back to real classes.
Parameters:
**obj**
Dictionary-like object to test.
**(return value)**
Either the same dictionary, or a class representing that object.
"""
if '__remoteobject__' in obj:
return RemoteObject(self, obj)
if '__objectreference__' in obj:
return self._objects[obj['__objectreference__']]
if '__functionreference__' in obj:
name = obj['__functionreference__']
if '.' in name:
objname, methodname = name.split('.')
obj = self._objects[objname]
else:
obj = self.handler
methodname = name
method = obj.get_method(methodname)
return method
return obj
def addrequest(self, request):
"""
Adds a request to the queue of requests waiting for response.
"""
assert(isinstance(request, Request))
assert(request.request_id not in self._requests)
self._requests[request.request_id] = request
def delrequest(self, req_id):
"""
Removes a request to the queue of requests waiting for response.
"""
del self._requests[req_id]
def dump_object(self, obj):
"""
Helper function to convert classes and functions to JSON objects.
Given a incompatible object called *obj*, dump_object returns a
JSON hinted object that represents the original parameter.
Parameters:
**obj**
Object, class, function,etc which is incompatible with JSON
serialization.
**(return value)**
A valid serialization for that object using JSON class hinting.
"""
# object of unknown type
if type(obj) is FunctionType or type(obj) is MethodType:
conn = getattr(obj, '_conn', None)
if conn != self:
raise TypeError("Tried to serialize as JSON a handler for "
"another connection!")
return self._dump_functionreference(obj)
if not isinstance(obj, object):
raise TypeError("JSON objects must be new-style classes")
if not hasattr(obj, '__class__'):
raise TypeError("JSON objects must be instances, not types")
if obj.__class__.__name__ == 'Decimal': # Probably is just a float.
return float(obj)
if isinstance(obj, RemoteObject):
return self._dump_objectreference(obj)
if hasattr(obj, 'get_method'):
return self._dump_remoteobject(obj)
raise TypeError("Python object %s laks a 'get_method' and "
"is not serializable!" % repr(obj))
def _dump_functionreference(self, obj):
""" Converts obj to a JSON hinted-class functionreference"""
return {'__functionreference__': obj.__name__}
def _dump_objectreference(self, obj):
""" Converts obj to a JSON hinted-class objectreference"""
return {'__objectreference__': obj.name}
def _dump_remoteobject(self, obj):
"""
Converts obj to a JSON hinted-class remoteobject, creating
a RemoteObject if necessary
"""
# An object can be remotely called if :
# - it derives from object (new-style classes)
# - it is an instance
# - has an internal function _get_method to handle remote calls
if not hasattr(obj, '__remoteobjects__'):
obj.__remoteobjects__ = {}
if self in obj.__remoteobjects__:
instancename = obj.__remoteobjects__[self]
else:
classname = obj.__class__.__name__
instancename = "%s_%04x" % (classname.lower(), self.get_id())
self._objects[instancename] = obj
obj.__remoteobjects__[self] = instancename
return {'__remoteobject__': instancename}
def _format_exception(self, obj, method, args, kw, exc):
etype, evalue, etb = exc
funargs = ", ".join(
[repr(x) for x in args] +
["%s=%r" % (k, kw[k]) for k in kw]
)
if len(funargs) > 40:
funargs = funargs[:37] + "..."
_log.error("(%s) In Handler method %s.%s(%s) ",
obj.__class__.__module__,
obj.__class__.__name__,
method,
funargs)
_log.debug("\n".join(["%s::%s:%d %s" % (
filename, fnname,
lineno, srcline)
for filename, lineno, fnname, srcline
in traceback.extract_tb(etb)[1:]]))
_log.error("Unhandled error: %s: %s", etype.__name__, evalue)
del etb
return '%s: %s' % (etype.__name__, evalue)
def _dispatch_delete(self, objectname):
try:
self._objects[objectname]._shutdown()
except Exception:
_log.error("Error when shutting down the object %s:",
type(self._objects[objectname]))
_log.debug(traceback.format_exc())
del self._objects[objectname]
def _extract_params(self, request):
req_method = request.get("method")
req_args = request.get("params", [])
if type(req_args) is dict:
req_kwargs = req_args
req_args = []
else:
req_kwargs = request.get("kwparams", {})
if req_kwargs:
req_kwargs = dict((str(k), req_kwargs[k]) for k in req_kwargs)
return req_method, req_args, req_kwargs
def _find_object(self, req_method, req_args, req_kwargs):
if '.' in req_method: # local-object.
objectname, req_method = req_method.split('.')[:2]
if objectname not in self._objects:
raise ValueError("Invalid object identifier")
elif req_method == '__delete__':
self._dispatch_delete(objectname)
else:
return self._objects[objectname]
else:
return self.handler
def _find_method(self, req_object, req_method, req_args, req_kwargs):
"""
Finds the method to process one request.
"""
try:
req_function = req_object.get_method(req_method)
return req_function
except ServerError as err:
return str(err)
except Exception:
err = self._format_exception(req_object, req_method,
req_args, req_kwargs,
sys.exc_info())
return err
def dispatch_until_empty(self):
"""
Calls *read_and_dispatch* method until there are no more messages to
dispatch in the buffer.
Returns the number of operations that succeded.
This method will never block waiting. If there aren't
any more messages that can be processed, it returns.
"""
ready_to_read = select.select(
[self._sck], # read
[], [], # write, errors
0 # timeout
)[0]
if not ready_to_read:
return 0
newline_idx = 0
count = 0
while newline_idx != -1:
if not self.read_and_dispatch(timeout=0):
break
count += 1
newline_idx = self._buffer.find(b'\n')
return count
def read_and_dispatch(self, timeout=None, thread=True, condition=None):
"""
Read one message from socket (with timeout specified by the optional
argument *timeout*) and dispatches that message.
Parameters:
**timeout** = None
Timeout in seconds of the read operation. If it is None
(or ommitted) then the read will wait
until new data is available.
**(return value)**
True, in case of the operation has suceeded and **one** message
has been dispatched. False, if no data or malformed data has
been received.
"""
self.read_lock.acquire()
self.reading_event.set()
try:
if condition:
if condition() is False:
return False
if thread:
dispatch_item = self.dispatch_item_threaded
else:
dispatch_item = self.dispatch_item_single
data = self.read(timeout=timeout)
if not data:
return False
try:
item = json.loads(data, self)
if type(item) is list: # batch call
for i in item:
dispatch_item(i)
elif type(item) is dict: # std call
if 'result' in item:
self.dispatch_item_single(item)
else:
dispatch_item(item)
else: # Unknown format :-(
_log.debug("Received message with unknown format type: %s", type(item))
return False
except Exception:
_log.debug(traceback.format_exc())
return False
return True
finally:
self.reading_event.clear()
self.read_lock.release()
def dispatch_item_threaded(self, item):
"""
If threaded mode is activated, this function creates a new thread per
each item received and returns without blocking.
"""
if self.threaded:
th1 = threading.Thread(target=self.dispatch_item_single, args=[item])
th1.start()
return True
else:
return self.dispatch_item_single(item)
def _send(self, response):
txtResponse = None
try:
txtResponse = json.dumps(response, self)
except Exception as e:
_log.error("An unexpected error ocurred when trying to create the message: %r", e)
response = {
'result': None,
'error': "InternalServerError: " + repr(e)
}
txtResponse = json.dumps(response, self)
try:
self.write(txtResponse)
except TypeError:
_log.debug("response was: %r", response)
raise
def _send_response(self, item, response):
if item.get('id') is not None:
ret = {'result': response, 'error': None, 'id': item['id']}
self._send(ret)
def _send_error(self, item, err):
if item.get('id') is not None:
ret = {'result': None, 'error': err, 'id': item['id']}
self._send(ret)
def dispatch_item_single(self, item):
"""
Given a JSON item received from socket, determine its type and
process the message.
"""
assert(type(item) is dict)
item.setdefault('id', None)
if 'method' in item:
method, args, kw = self._extract_params(item)
obj = self._find_object(method, args, kw)
if obj is None:
return
fn = self._find_method(obj, method, args, kw)
try:
if inspect.isgeneratorfunction(fn):
for response in fn(*args, **kw):
self._send_response(item, response)
elif callable(fn):
self._send_response(item, fn(*args, **kw))
elif fn:
self._send_error(item, fn)
except ServerError as exc:
self._send_error(item, str(exc))
except Exception:
err = self._format_exception(obj, method, args, kw,
sys.exc_info())
self._send_error(item, err)
elif 'result' in item:
assert(item['id'] in self._requests)
request = self._requests[item['id']]
request.setresponse(item)
else:
self._send_error(item, 'Unknown format')
return True
def proxy(self, sync_type, name, args, kwargs):
"""
Call method on server.
sync_type ::
= 0 .. call method, wait, get response.
= 1 .. call method, inmediate return of object.
= 2 .. call notification and exit.
= 3 .. call method, inmediate return of non-auto-close object.
"""
data = {}
data['method'] = name
if sync_type in [0, 1, 3]:
data['id'] = self.get_id()
if len(args) > 0:
data['params'] = args
if len(kwargs) > 0:
if len(args) == 0:
data['params'] = kwargs
else:
data['kwparams'] = kwargs
if sync_type == 2: # short-circuit for speed!
self.write(json.dumps(data, self))
return None
req = Request(self, data)
if sync_type == 0:
return req.value
if sync_type == 3:
req.auto_close = False
return req
def close(self):
"""
Close the connection and the socket.
"""
if self.connection_status == "closed":
return
item = {
'abort': True,
'event': threading.Event()
}
self.write_thread_queue.append(item)
self.write_thread_semaphore.release() # notify new item.
item['event'].wait(1)
if not item['event'].isSet():
_log.warning("write thread doesn't process our abort command")
try:
self.handler._shutdown()
except Exception:
_log.error("Error when shutting down the handler: %s",
traceback.format_exc())
try:
self._sck.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._sck.close()
self.connection_status = "closed"
def write_line(self, data):
"""
Write a line *data* to socket. It appends a **newline** at
the end of the *data* before sending it.
The string MUST NOT contain **newline** otherwise an AssertionError will
raise.
Parameters:
**data**
String containing the data to be sent.
"""
assert('\n' not in data)
self.write_lock.acquire()
try:
try:
data = data.encode('utf-8')
except AttributeError:
pass
if self._debug_socket:
_log.debug("<:%d: %s", len(data), data.decode('utf-8')[:130])
self._wbuffer += data + b'\n'
sbytes = 0
while self._wbuffer:
try:
sbytes = self._sck.send(self._wbuffer)
except IOError:
_log.debug("Read socket error: IOError (timeout: %r)",
self._sck.gettimeout())
_log.debug(traceback.format_exc(0))
return 0
except socket.error:
_log.debug("Read socket error: socket.error (timeout: %r)",
self._sck.gettimeout())
_log.debug(traceback.format_exc(0))
return 0
except:
raise
if sbytes == 0:
break
self._wbuffer = self._wbuffer[sbytes:]
if self._wbuffer:
_log.warning("%d bytes left in write buffer", len(self._wbuffer))
return len(self._wbuffer)
finally:
self.write_lock.release()
def read_line(self):
"""
Read a line of *data* from socket. It removes the `\\n` at
the end before returning the value.
If the original packet contained `\\n`, the message will be decoded
as two or more messages.
Returns the line of *data* received from the socket.
"""
self.read_lock.acquire()
try:
data = self._readn()
if len(data) and self._debug_socket:
_log.debug(">:%d: %s", len(data), data.decode('utf-8')[:130])
return data.decode('utf-8')
finally:
self.read_lock.release()
def settimeout(self, operation, timeout):
"""
configures a timeout for the connection for a given operation.
operation is one of "read" or "write"
"""
if operation in self._maxtimeout:
maxtimeout = self._maxtimeout[operation]
else:
maxtimeout = None
if maxtimeout is not None:
if timeout is None or timeout > maxtimeout:
timeout = maxtimeout
self._sck.settimeout(timeout)
def write_thread(self):
abort = False
while not abort:
self.write_thread_semaphore.acquire()
try:
item = self.write_thread_queue.pop(0)
except IndexError: # pop from empty list?
_log.warning("write queue was empty??")
continue
abort = item.get("abort", False)
event = item.get("event")
write_data = item.get("write_data")
if write_data:
item["result"] = self.write_now(write_data)
if event:
event.set()
if self._debug_socket:
_log.debug("Writing thread finished.")
def write(self, data, timeout=None):
item = {
'write_data': data
}
self.write_thread_queue.append(item)
self.write_thread_semaphore.release() # notify new item.
def write_now(self, data, timeout=None):
"""
Standard function to write to the socket
which by default points to write_line
"""
# self.scklock.acquire()
self.settimeout("write", timeout)
ret = None
# try:
ret = self.write_line(data)
# finally:
# self.scklock.release()
return ret
def read(self, timeout=None):
"""
Standard function to read from the socket
which by default points to read_line
"""
ret = None
self.scklock.acquire()
self.settimeout("read", timeout)
try:
ret = self.read_line()
finally:
self.scklock.release()
return ret
def _readn(self):
"""
Internal function which reads from socket waiting for a newline
"""
streambuffer = self._buffer
pos = streambuffer.find(b'\n')
# _log.debug("read...")
# retry = 0
while pos == -1:
data = b''
try:
data = self._sck.recv(2048)
except IOError as inst:
_log.debug("Read socket error: IOError%r (timeout: %r)",
inst.args, self._sck.gettimeout())
if inst.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
if self._sck.gettimeout() == 0: # if it was too fast
self._sck.settimeout(5)
continue
# time.sleep(0.5)
# retry += 1
# if retry < 10:
# _log.debug("Retry %s", retry)
# continue
# _log.debug(traceback.format_exc(0))
if inst.errno in self._SOCKET_COMM_ERRORS:
raise EofError(len(streambuffer))
return b''
except socket.error as inst:
_log.error("Read socket error: socket.error%r (timeout: %r)",
inst.args, self._sck.gettimeout())
# _log.debug(traceback.format_exc(0))
return b''
except:
raise
if not data:
raise EofError(len(streambuffer))
# _log.debug("readbuf+: %r", data)
streambuffer += data
pos = streambuffer.find(b'\n')
self._buffer = streambuffer[pos + 1:]
streambuffer = streambuffer[:pos]
# _log.debug("read: %r", buffer)
return streambuffer
def serve(self):
"""
Basic function to put the connection serving. Usually is better to
use server.Server class to do this, but this would be useful too if
it is run from a separate Thread.
"""
try:
while True:
self.read_and_dispatch()
finally:
self.close()
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a variety of device interactions with power.
"""
# pylint: disable=unused-argument
import collections
import contextlib
import csv
import logging
from pylib import constants
from pylib.device import decorators
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import timeout_retry
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 3
_CONTROL_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
# The list of useful dumpsys columns.
# Index of the column containing the format version.
_DUMP_VERSION_INDEX = 0
# Index of the column containing the type of the row.
_ROW_TYPE_INDEX = 3
# Index of the column containing the uid.
_PACKAGE_UID_INDEX = 4
# Index of the column containing the application package.
_PACKAGE_NAME_INDEX = 5
# The column containing the uid of the power data.
_PWI_UID_INDEX = 1
# The column containing the type of consumption. Only consumtion since last
# charge are of interest here.
_PWI_AGGREGATION_INDEX = 2
# The column containing the amount of power used, in mah.
_PWI_POWER_CONSUMPTION_INDEX = 5
class BatteryUtils(object):
def __init__(self, device, default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES):
"""BatteryUtils constructor.
Args:
device: A DeviceUtils instance.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value
is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit
value is provided.
Raises:
TypeError: If it is not passed a DeviceUtils instance.
"""
if not isinstance(device, device_utils.DeviceUtils):
raise TypeError('Must be initialized with DeviceUtils object.')
self._device = device
self._cache = device.GetClientCache(self.__class__.__name__)
self._default_timeout = default_timeout
self._default_retries = default_retries
@decorators.WithTimeoutAndRetriesFromInstance()
def GetNetworkData(self, package, timeout=None, retries=None):
""" Get network data for specific package.
Args:
package: package name you want network data for.
timeout: timeout in seconds
retries: number of retries
Returns:
Tuple of (sent_data, recieved_data)
None if no network data found
"""
# If device_utils clears cache, cache['uids'] doesn't exist
if 'uids' not in self._cache:
self._cache['uids'] = {}
if package not in self._cache['uids']:
self.GetPowerData()
if package not in self._cache['uids']:
logging.warning('No UID found for %s. Can\'t get network data.',
package)
return None
network_data_path = '/proc/uid_stat/%s/' % self._cache['uids'][package]
try:
send_data = int(self._device.ReadFile(network_data_path + 'tcp_snd'))
# If ReadFile throws exception, it means no network data usage file for
# package has been recorded. Return 0 sent and 0 received.
except device_errors.AdbShellCommandFailedError:
logging.warning('No sent data found for package %s', package)
send_data = 0
try:
recv_data = int(self._device.ReadFile(network_data_path + 'tcp_rcv'))
except device_errors.AdbShellCommandFailedError:
logging.warning('No received data found for package %s', package)
recv_data = 0
return (send_data, recv_data)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPowerData(self, timeout=None, retries=None):
""" Get power data for device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
Dict of power data, keyed on package names.
{
package_name: {
'uid': uid,
'data': [1,2,3]
},
}
"""
if 'uids' not in self._cache:
self._cache['uids'] = {}
dumpsys_output = self._device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True)
csvreader = csv.reader(dumpsys_output)
pwi_entries = collections.defaultdict(list)
for entry in csvreader:
if entry[_DUMP_VERSION_INDEX] not in ['8', '9']:
# Wrong dumpsys version.
raise device_errors.DeviceVersionError(
'Dumpsys version must be 8 or 9. %s found.'
% entry[_DUMP_VERSION_INDEX])
if _ROW_TYPE_INDEX < len(entry) and entry[_ROW_TYPE_INDEX] == 'uid':
current_package = entry[_PACKAGE_NAME_INDEX]
if (self._cache['uids'].get(current_package)
and self._cache['uids'].get(current_package)
!= entry[_PACKAGE_UID_INDEX]):
raise device_errors.CommandFailedError(
'Package %s found multiple times with differnt UIDs %s and %s'
% (current_package, self._cache['uids'][current_package],
entry[_PACKAGE_UID_INDEX]))
self._cache['uids'][current_package] = entry[_PACKAGE_UID_INDEX]
elif (_PWI_POWER_CONSUMPTION_INDEX < len(entry)
and entry[_ROW_TYPE_INDEX] == 'pwi'
and entry[_PWI_AGGREGATION_INDEX] == 'l'):
pwi_entries[entry[_PWI_UID_INDEX]].append(
float(entry[_PWI_POWER_CONSUMPTION_INDEX]))
return {p: {'uid': uid, 'data': pwi_entries[uid]}
for p, uid in self._cache['uids'].iteritems()}
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPackagePowerData(self, package, timeout=None, retries=None):
""" Get power data for particular package.
Args:
package: Package to get power data on.
returns:
Dict of UID and power data.
{
'uid': uid,
'data': [1,2,3]
}
None if the package is not found in the power data.
"""
return self.GetPowerData().get(package)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetBatteryInfo(self, timeout=None, retries=None):
"""Gets battery info for the device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
A dict containing various battery information as reported by dumpsys
battery.
"""
result = {}
# Skip the first line, which is just a header.
for line in self._device.RunShellCommand(
['dumpsys', 'battery'], check_return=True)[1:]:
# If usb charging has been disabled, an extra line of header exists.
if 'UPDATES STOPPED' in line:
logging.warning('Dumpsys battery not receiving updates. '
'Run dumpsys battery reset if this is in error.')
elif ':' not in line:
logging.warning('Unknown line found in dumpsys battery: "%s"', line)
else:
k, v = line.split(':', 1)
result[k.strip()] = v.strip()
return result
@decorators.WithTimeoutAndRetriesFromInstance()
def GetCharging(self, timeout=None, retries=None):
"""Gets the charging state of the device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is charging, false otherwise.
"""
battery_info = self.GetBatteryInfo()
for k in ('AC powered', 'USB powered', 'Wireless powered'):
if (k in battery_info and
battery_info[k].lower() in ('true', '1', 'yes')):
return True
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def SetCharging(self, enabled, timeout=None, retries=None):
"""Enables or disables charging on the device.
Args:
enabled: A boolean indicating whether charging should be enabled or
disabled.
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.CommandFailedError: If method of disabling charging cannot
be determined.
"""
if 'charging_config' not in self._cache:
for c in _CONTROL_CHARGING_COMMANDS:
if self._device.FileExists(c['witness_file']):
self._cache['charging_config'] = c
break
else:
raise device_errors.CommandFailedError(
'Unable to find charging commands.')
if enabled:
command = self._cache['charging_config']['enable_command']
else:
command = self._cache['charging_config']['disable_command']
def set_and_verify_charging():
self._device.RunShellCommand(command, check_return=True)
return self.GetCharging() == enabled
timeout_retry.WaitFor(set_and_verify_charging, wait_period=1)
# TODO(rnephew): Make private when all use cases can use the context manager.
@decorators.WithTimeoutAndRetriesFromInstance()
def DisableBatteryUpdates(self, timeout=None, retries=None):
""" Resets battery data and makes device appear like it is not
charging so that it will collect power data since last charge.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.CommandFailedError: When resetting batterystats fails to
reset power values.
device_errors.DeviceVersionError: If device is not L or higher.
"""
def battery_updates_disabled():
return self.GetCharging() is False
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
raise device_errors.DeviceVersionError('Device must be L or higher.')
self._device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True)
self._device.RunShellCommand(
['dumpsys', 'batterystats', '--reset'], check_return=True)
battery_data = self._device.RunShellCommand(
['dumpsys', 'batterystats', '--charged', '--checkin'],
check_return=True)
ROW_TYPE_INDEX = 3
PWI_POWER_INDEX = 5
for line in battery_data:
l = line.split(',')
if (len(l) > PWI_POWER_INDEX and l[ROW_TYPE_INDEX] == 'pwi'
and l[PWI_POWER_INDEX] != 0):
raise device_errors.CommandFailedError(
'Non-zero pmi value found after reset.')
self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'ac', '0'],
check_return=True)
self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'usb', '0'],
check_return=True)
timeout_retry.WaitFor(battery_updates_disabled, wait_period=1)
# TODO(rnephew): Make private when all use cases can use the context manager.
@decorators.WithTimeoutAndRetriesFromInstance()
def EnableBatteryUpdates(self, timeout=None, retries=None):
""" Restarts device charging so that dumpsys no longer collects power data.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.DeviceVersionError: If device is not L or higher.
"""
def battery_updates_enabled():
return self.GetCharging() is True
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
raise device_errors.DeviceVersionError('Device must be L or higher.')
self._device.RunShellCommand(['dumpsys', 'battery', 'reset'],
check_return=True)
timeout_retry.WaitFor(battery_updates_enabled, wait_period=1)
@contextlib.contextmanager
def BatteryMeasurement(self, timeout=None, retries=None):
"""Context manager that enables battery data collection. It makes
the device appear to stop charging so that dumpsys will start collecting
power data since last charge. Once the with block is exited, charging is
resumed and power data since last charge is no longer collected.
Only for devices L and higher.
Example usage:
with BatteryMeasurement():
browser_actions()
get_power_data() # report usage within this block
after_measurements() # Anything that runs after power
# measurements are collected
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.DeviceVersionError: If device is not L or higher.
"""
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
raise device_errors.DeviceVersionError('Device must be L or higher.')
try:
self.DisableBatteryUpdates(timeout=timeout, retries=retries)
yield
finally:
self.EnableBatteryUpdates(timeout=timeout, retries=retries)
def ChargeDeviceToLevel(self, level, wait_period=60):
"""Enables charging and waits for device to be charged to given level.
Args:
level: level of charge to wait for.
wait_period: time in seconds to wait between checking.
"""
self.SetCharging(True)
def device_charged():
battery_level = self.GetBatteryInfo().get('level')
if battery_level is None:
logging.warning('Unable to find current battery level.')
battery_level = 100
else:
logging.info('current battery level: %s', battery_level)
battery_level = int(battery_level)
return battery_level >= level
timeout_retry.WaitFor(device_charged, wait_period=wait_period)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from oslo_serialization import jsonutils
import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import function
class FindInMap(function.Function):
"""A function for resolving keys in the template mappings.
Takes the form::
{ "Fn::FindInMap" : [ "mapping",
"key",
"value" ] }
"""
def __init__(self, stack, fn_name, args):
super(FindInMap, self).__init__(stack, fn_name, args)
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
raise KeyError(six.text_type(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
key = function.resolve(self._mapkey)
value = function.resolve(self._mapvalue)
return mapping[key][value]
class GetAZs(function.Function):
"""A function for retrieving the availability zones.
Takes the form::
{ "Fn::GetAZs" : "<region>" }
"""
def result(self):
# TODO(therve): Implement region scoping
if self.stack is None:
return ['nova']
else:
return self.stack.get_availability_zones()
class ParamRef(function.Function):
"""A function for resolving parameter references.
Takes the form::
{ "Ref" : "<param_name>" }
"""
def __init__(self, stack, fn_name, args):
super(ParamRef, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
param_name = function.resolve(self.args)
try:
return self.parameters[param_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=param_name,
key='unknown')
class ResourceRef(function.Function):
"""A function for resolving resource references.
Takes the form::
{ "Ref" : "<resource_name>" }
"""
def _resource(self, path='unknown'):
resource_name = function.resolve(self.args)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(ResourceRef, self).dependencies(path),
[self._resource(path)])
def result(self):
return self._resource().FnGetRefId()
def Ref(stack, fn_name, args):
"""A function for resolving parameters or resource references.
Takes the form::
{ "Ref" : "<param_name>" }
or::
{ "Ref" : "<resource_name>" }
"""
if args in stack:
RefClass = ResourceRef
else:
RefClass = ParamRef
return RefClass(stack, fn_name, args)
class GetAtt(function.Function):
"""A function for resolving resource attributes.
Takes the form::
{ "Fn::GetAtt" : [ "<resource_name>",
"<attribute_name" ] }
"""
def __init__(self, stack, fn_name, args):
super(GetAtt, self).__init__(stack, fn_name, args)
self._resource_name, self._attribute = self._parse_args()
def _parse_args(self):
try:
resource_name, attribute = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute]') % self.fn_name)
return resource_name, attribute
def _resource(self, path='unknown'):
resource_name = function.resolve(self._resource_name)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
attrs = [function.resolve(self._attribute)]
else:
attrs = []
return itertools.chain(super(GetAtt, self).dep_attrs(resource_name),
attrs)
def dependencies(self, path):
return itertools.chain(super(GetAtt, self).dependencies(path),
[self._resource(path)])
def _allow_without_attribute_name(self):
return False
def validate(self):
super(GetAtt, self).validate()
res = self._resource()
if self._allow_without_attribute_name():
# if allow without attribute_name, then don't check
# when attribute_name is None
if self._attribute is None:
return
attr = function.resolve(self._attribute)
from heat.engine import resource
if (type(res).get_attribute == resource.Resource.get_attribute and
attr not in res.attributes_schema):
raise exception.InvalidTemplateAttribute(
resource=self._resource_name, key=attr)
def result(self):
attribute = function.resolve(self._attribute)
r = self._resource()
if r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.ROLLBACK, r.SNAPSHOT, r.CHECK):
return r.FnGetAtt(attribute)
# NOTE(sirushtim): Add r.INIT to states above once convergence
# is the default.
elif r.stack.has_cache_data(r.name) and r.action == r.INIT:
return r.FnGetAtt(attribute)
else:
return None
class Select(function.Function):
"""A function for selecting an item from a list or map.
Takes the form (for a list lookup)::
{ "Fn::Select" : [ "<index>", [ "<value_1>", "<value_2>", ... ] ] }
Takes the form (for a map lookup)::
{ "Fn::Select" : [ "<index>", { "<key_1>": "<value_1>", ... } ] }
If the selected index is not found, this function resolves to an empty
string.
"""
def __init__(self, stack, fn_name, args):
super(Select, self).__init__(stack, fn_name, args)
try:
self._lookup, self._strings = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[index, collection]') % self.fn_name)
def result(self):
index = function.resolve(self._lookup)
strings = function.resolve(self._strings)
if strings == '':
# an empty string is a common response from other
# functions when result is not currently available.
# Handle by returning an empty string
return ''
if isinstance(strings, six.string_types):
# might be serialized json.
try:
strings = jsonutils.loads(strings)
except ValueError as json_ex:
fmt_data = {'fn_name': self.fn_name,
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
if isinstance(strings, collections.Mapping):
if not isinstance(index, six.string_types):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
try:
index = int(index)
except (ValueError, TypeError):
pass
if (isinstance(strings, collections.Sequence) and
not isinstance(strings, six.string_types)):
if not isinstance(index, six.integer_types):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
try:
return strings[index]
except IndexError:
return ''
if strings is None:
return ''
raise TypeError(_('Arguments to %s not fully resolved') %
self.fn_name)
class Join(function.Function):
"""A function for joining strings.
Takes the form::
{ "Fn::Join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
def __init__(self, stack, fn_name, args):
super(Join, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if not isinstance(self.args, list):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if strings is None:
strings = []
if (isinstance(strings, six.string_types) or
not isinstance(strings, collections.Sequence)):
raise TypeError(_('"%s" must operate on a list') % self.fn_name)
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
if s is None:
return ''
if not isinstance(s, six.string_types):
raise TypeError(
_('Items to join must be strings not %s'
) % (repr(s)[:200]))
return s
return delim.join(ensure_string(s) for s in strings)
class Split(function.Function):
"""A function for splitting strings.
Takes the form::
{ "Fn::Split" : [ "<delim>", "<string_1><delim><string_2>..." ] }
And resolves to::
[ "<string_1>", "<string_2>", ... ]
"""
def __init__(self, stack, fn_name, args):
super(Split, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "str1,str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if not isinstance(self._delim, six.string_types):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
if not isinstance(strings, six.string_types):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
return strings.split(self._delim)
class Replace(function.Function):
"""A function for performing string substitutions.
Takes the form::
{ "Fn::Replace" : [
{ "<key_1>": "<value_1>", "<key_2>": "<value_2>", ... },
"<key_1> <key_2>"
] }
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. Longer keys are
substituted before shorter ones, but the order in which replacements are
performed is otherwise undefined.
"""
def __init__(self, stack, fn_name, args):
super(Replace, self).__init__(stack, fn_name, args)
self._mapping, self._string = self._parse_args()
if not isinstance(self._mapping,
(collections.Mapping, function.Function)):
raise TypeError(_('"%s" parameters must be a mapping') %
self.fn_name)
def _parse_args(self):
example = ('{"%s": '
'[ {"$var1": "foo", "%%var2%%": "bar"}, '
'"$var1 is %%var2%%"]}' % self.fn_name)
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
mapping, string = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
else:
return mapping, string
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
raise TypeError(_('"%s" params must be strings or numbers') %
self.fn_name)
return string.replace(placeholder, six.text_type(value))
mapping = collections.OrderedDict(sorted(mapping.items(),
key=lambda t: len(t[0]),
reverse=True))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class Base64(function.Function):
"""A placeholder function for converting to base64.
Takes the form::
{ "Fn::Base64" : "<string>" }
This function actually performs no conversion. It is included for the
benefit of templates that convert UserData to Base64. Heat accepts UserData
in plain text.
"""
def result(self):
resolved = function.resolve(self.args)
if not isinstance(resolved, six.string_types):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
class MemberListToMap(function.Function):
"""A function to convert lists with enumerated keys and values to mapping.
Takes the form::
{ 'Fn::MemberListToMap' : [ 'Name',
'Value',
[ '.member.0.Name=<key_0>',
'.member.0.Value=<value_0>',
... ] ] }
And resolves to::
{ "<key_0>" : "<value_0>", ... }
The first two arguments are the names of the key and value.
"""
def __init__(self, stack, fn_name, args):
super(MemberListToMap, self).__init__(stack, fn_name, args)
try:
self._keyname, self._valuename, self._list = self.args
except ValueError:
correct = '''
{'Fn::MemberListToMap': ['Name', 'Value',
['.member.0.Name=key',
'.member.0.Value=door']]}
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
if not isinstance(self._keyname, six.string_types):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
if not isinstance(self._valuename, six.string_types):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
if not isinstance(member_list, collections.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
if not isinstance(s, six.string_types):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
partials = dict(item(s) for s in member_list)
return aws_utils.extract_param_pairs(partials,
prefix='',
keyname=self._keyname,
valuename=self._valuename)
class ResourceFacade(function.Function):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
{ "Fn::ResourceFacade": "<attribute_type>" }
where the valid attribute types are "Metadata", "DeletionPolicy" and
"UpdatePolicy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'Metadata', 'DeletionPolicy', 'UpdatePolicy'
)
def __init__(self, stack, fn_name, args):
super(ResourceFacade, self).__init__(stack, fn_name, args)
if self.args not in self._RESOURCE_ATTRIBUTES:
fmt_data = {'fn_name': self.fn_name,
'allowed': ', '.join(self._RESOURCE_ATTRIBUTES)}
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be one of: %(allowed)s') % fmt_data)
def result(self):
attr = function.resolve(self.args)
if attr == self.METADATA:
return self.stack.parent_resource.metadata_get()
elif attr == self.UPDATE_POLICY:
up = self.stack.parent_resource.t._update_policy or {}
return function.resolve(up)
elif attr == self.DELETION_POLICY:
return self.stack.parent_resource.t.deletion_policy()
class Not(function.Function):
"""A function acts as a NOT operator.
Takes the form::
{ "Fn::Not" : [condition] }
Returns true for a condition that evaluates to false or
returns false for a condition that evaluates to true.
"""
def __init__(self, stack, fn_name, args):
super(Not, self).__init__(stack, fn_name, args)
try:
if (not self.args or
not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise ValueError()
if len(self.args) != 1:
raise ValueError()
self.condition = self.args[0]
except ValueError:
msg = _('Arguments to "%s" must be of the form: '
'[condition]')
raise ValueError(msg % self.fn_name)
def result(self):
resolved_value = function.resolve(self.condition)
if not isinstance(resolved_value, bool):
msg = _('The condition value should be boolean, '
'after resolved the value is: %s')
raise ValueError(msg % resolved_value)
return not resolved_value
|
|
import sys
import socket
import threading
user_recv_tasker_name_get_stat = "block"
user_message_sender_name_get_stat = "block"
login_user_socket = {}
login_user_message = {}
login_user_list = []
login_user_name = ""
def log_in_check( user_name , password ):
if user_name == "Mark":
if password == "IAMMARK":
return "checkOK"
if user_name == "Anna":
if password == "123456":
return "checkOK"
if user_name == "Bob":
if password == "BoomBoom":
return "checkOK"
if user_name == "Frank2015":
if password == "2015Frank":
return "checkOK"
if user_name == "Test":
if password == "Test":
return "checkOK"
return "noncheckOK"
def user_message_send():
global user_message_sender_name_get_stat
global login_user_name
global login_user_socket
global login_user_list
global login_user_message
while user_message_sender_name_get_stat == "block":
pass
user_name = login_user_name
user_message_sender_name_get_stat = "block"
user = login_user_socket[user_name]
print user_name + " sender build"
while user_name in login_user_list:
if len( login_user_message[user_name] ) > 0:
user.sendall( login_user_message[user_name].pop(0) )
def user_recv_tasker():
global login_user_name
global login_user_socket
global login_user_message
global login_user_list
global user_recv_tasker_name_get_stat
while user_recv_tasker_name_get_stat == "block":
pass
user_name = login_user_name
user_recv_tasker_name_get_stat = "block"
user = login_user_socket[ user_name ]
print user_name + " recver build"
user_message_sender = threading.Thread( None , user_message_send , None , () , )
user_message_sender.start()
while user_name in login_user_list:
user_command = user.recv(1500)
if not user_command:
login_user_list.remove(user_name)
user_message_sender.join()
user.close()
login_user_socket.pop( user_name )
user_command_list = user_command.split(";")
if user_command_list[0] in login_user_list:
if len( user_command_list ) == 2:
login_user_message[ user_command_list[0] ].append( ">" + user_name + " : " + user_command_list[1] )
if user_command_list[0] == "listuser":
result = ""
i = 0
while i < len( login_user_list ):
result = result + " " + login_user_list[i]
i = i + 1
user.sendall(result)
if user_command_list[0] == "broadcast":
if len( user_command_list ) == 2:
i = 0
while i < len( login_user_list ):
login_user_message[ login_user_list[i] ].append( ">" + "broadcast:" + user_command_list[1] )
i = i + 1
if user_command_list[0] == "send":
if len( user_command_list ) == 3:
login_user_message[ user_command_list[1] ].append( ">" + user_name + "send :" + user_command_list[2] )
login_user_message.setdefault( "Mark" , [] )
login_user_message.setdefault( "Test" , [] )
login_user_message.setdefault( "Frank2015" , [] )
login_user_message.setdefault( "Bob" , [] )
login_user_message.setdefault( "Anna" , [] )
server = socket.socket( socket.AF_INET , socket.SOCK_STREAM )
server.bind(("127.0.0.1",11111))
while 1:
server.listen(5)
user,client_address = server.accept()
command = user.recv(1500)
command_list = command.split(";")
while log_in_check( command_list[1] , command_list[2] ) != "checkOK":
user.sendall("nonloginOK")
command = user.recv(1500)
command_list = command.split(";")
user.sendall("loginOK")
print command_list[1] + " log in OK"
login_user_list.append( command_list[1] )
login_user_name = command_list[1]
login_user_socket.setdefault( command_list[1] , user )
threading.Thread( None , user_recv_tasker , None , () , ).start()
user_recv_tasker_name_get_stat = "notget"
user_message_sender_name_get_stat = "notget"
while ( user_recv_tasker_name_get_stat == "notget" ) or ( user_message_sender_name_get_stat == "notget" ):
pass
|
|
import contextlib
import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.excel import ExcelWriter
from pandas.io.formats.excel import ExcelFormatter
pytest.importorskip("jinja2")
# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
# could compute styles and render to excel without jinja2, since there is no
# 'template' file, but this needs the import error to delayed until render time.
def assert_equal_cell_styles(cell1, cell2):
# TODO: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
@pytest.mark.parametrize(
"engine",
["xlsxwriter", "openpyxl"],
)
def test_styler_to_excel_unstyled(engine):
# compare DataFrame.to_excel and Styler.to_excel when no styles applied
pytest.importorskip(engine)
df = DataFrame(np.random.randn(2, 2))
with tm.ensure_clean(".xlsx") as path:
with ExcelWriter(path, engine=engine) as writer:
df.to_excel(writer, sheet_name="dataframe")
df.style.to_excel(writer, sheet_name="unstyled")
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_cell_styles(cell1, cell2)
shared_style_params = [
(
"background-color: #111222",
["fill", "fgColor", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
(
"color: #111222",
["font", "color", "value"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
("font-family: Arial;", ["font", "name"], "arial"),
("font-weight: bold;", ["font", "b"], True),
("font-style: italic;", ["font", "i"], True),
("text-decoration: underline;", ["font", "u"], "single"),
("number-format: $??,???.00;", ["number_format"], "$??,???.00"),
("text-align: left;", ["alignment", "horizontal"], "left"),
(
"vertical-align: bottom;",
["alignment", "vertical"],
{"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails
),
# Border widths
("border-left: 2pt solid red", ["border", "left", "style"], "medium"),
("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"),
("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"),
("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"),
("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"),
("border-left: 1pt solid red", ["border", "left", "style"], "thin"),
("border-left: 3pt solid red", ["border", "left", "style"], "thick"),
# Border expansion
(
"border-left: 2pt solid #111222",
["border", "left", "color", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
("border: 1pt solid red", ["border", "top", "style"], "thin"),
(
"border: 1pt solid #111222",
["border", "top", "color", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
("border: 1pt solid red", ["border", "right", "style"], "thin"),
(
"border: 1pt solid #111222",
["border", "right", "color", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
("border: 1pt solid red", ["border", "bottom", "style"], "thin"),
(
"border: 1pt solid #111222",
["border", "bottom", "color", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
("border: 1pt solid red", ["border", "left", "style"], "thin"),
(
"border: 1pt solid #111222",
["border", "left", "color", "rgb"],
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
),
]
@pytest.mark.parametrize(
"engine",
["xlsxwriter", "openpyxl"],
)
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
def test_styler_to_excel_basic(engine, css, attrs, expected):
pytest.importorskip(engine)
df = DataFrame(np.random.randn(1, 1))
styler = df.style.applymap(lambda x: css)
with tm.ensure_clean(".xlsx") as path:
with ExcelWriter(path, engine=engine) as writer:
df.to_excel(writer, sheet_name="dataframe")
styler.to_excel(writer, sheet_name="styled")
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
# test unstyled data cell does not have expected styles
# test styled cell has expected styles
u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2)
for attr in attrs:
u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr)
if isinstance(expected, dict):
assert u_cell is None or u_cell != expected[engine]
assert s_cell == expected[engine]
else:
assert u_cell is None or u_cell != expected
assert s_cell == expected
@pytest.mark.parametrize(
"engine",
["xlsxwriter", "openpyxl"],
)
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
pytest.importorskip(engine)
df = DataFrame(np.random.randn(1, 1))
styler = df.style
styler.applymap_index(lambda x: css, axis=0)
styler.applymap_index(lambda x: css, axis=1)
null_styler = df.style
null_styler.applymap(lambda x: "null: css;")
null_styler.applymap_index(lambda x: "null: css;", axis=0)
null_styler.applymap_index(lambda x: "null: css;", axis=1)
with tm.ensure_clean(".xlsx") as path:
with ExcelWriter(path, engine=engine) as writer:
null_styler.to_excel(writer, sheet_name="null_styled")
styler.to_excel(writer, sheet_name="styled")
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
# test null styled index cells does not have expected styles
# test styled cell has expected styles
ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1)
uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2)
for attr in attrs:
ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr)
uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr)
if isinstance(expected, dict):
assert ui_cell is None or ui_cell != expected[engine]
assert si_cell == expected[engine]
assert uc_cell is None or uc_cell != expected[engine]
assert sc_cell == expected[engine]
else:
assert ui_cell is None or ui_cell != expected
assert si_cell == expected
assert uc_cell is None or uc_cell != expected
assert sc_cell == expected
def test_styler_custom_converter():
openpyxl = pytest.importorskip("openpyxl")
def custom_converter(css):
return {"font": {"color": {"rgb": "111222"}}}
df = DataFrame(np.random.randn(1, 1))
styler = df.style.applymap(lambda x: "color: #888999")
with tm.ensure_clean(".xlsx") as path:
with ExcelWriter(path, engine="openpyxl") as writer:
ExcelFormatter(styler, style_converter=custom_converter).write(
writer, sheet_name="custom"
)
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
assert wb["custom"].cell(2, 2).font.color.value == "00111222"
|
|
import os
import uuid
#from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from ..models import DBSession, GroupRoutePermission, Group, Route
from datatables import ColumnDT, DataTables
#from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah routes gagal'
SESS_EDIT_FAILED = 'Edit routes gagal'
def deferred_source_type(node, kw):
values = kw.get('perm_choice', [])
return widget.SelectWidget(values=values)
class AddSchema(colander.Schema):
group_widget = widget.AutocompleteInputWidget(
size=60,
values = '/group/headofnama/act',
min_length=3)
route_widget = widget.AutocompleteInputWidget(
size=60,
values = '/routes/headof/act',
min_length=3)
group_id = colander.SchemaNode(
colander.Integer(),
widget = widget.HiddenWidget(),
oid = 'group_id')
group_nm = colander.SchemaNode(
colander.String(),
widget = group_widget,
oid = 'group_nm')
route_id = colander.SchemaNode(
colander.Integer(),
widget = widget.HiddenWidget(),
oid = 'route_id')
route_nm = colander.SchemaNode(
colander.String(),
widget = route_widget,
title ='Route',
oid = 'route_nm')
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
########
# List #
########
@view_config(route_name='group-routes', renderer='templates/group-routes/list.pt',
permission='group-routes')
def view_list(request):
return dict(a={})
##########
# Action #
##########
@view_config(route_name='group-routes-act', renderer='json',
permission='group-routes-act')
def group_routes_act(request):
ses = request.session
req = request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('group_id'))
columns.append(ColumnDT('route_id'))
columns.append(ColumnDT('groups.group_name'))
columns.append(ColumnDT('routes.nama'))
columns.append(ColumnDT('routes.path'))
query = DBSession.query(GroupRoutePermission).join(Group).join(Route)
rowTable = DataTables(req, GroupRoutePermission, query, columns)
return rowTable.output_result()
elif url_dict['act']=='changeid':
row = GroupRoutePermission.get_by_id('routes_id' in params and params['routes_id'] or 0)
if row:
ses['routes_id']=row.id
ses['routes_kd']=row.kode
ses['routes_nm']=row.nama
return {'success':True}
#######
# Add #
#######
def form_validator(form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(GroupRoutePermission).filter_by(id=uid)
routes = q.first()
else:
routes = None
def get_form(request, class_form, row=None):
schema = class_form(validator=form_validator)
schema = schema.bind()
schema.request = request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(values, user, row=None):
if not row:
row = GroupRoutePermission()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(request, values, row=None):
if 'id' in request.matchdict:
values['id'] = request.matchdict['id']
row = save(values, request.user, row)
request.session.flash('routes sudah disimpan.')
def routes_list(request):
return HTTPFound(location=request.route_url('group-routes'))
def session_failed(request, session_name):
r = dict(form=request.session[session_name])
del request.session[session_name]
return r
@view_config(route_name='group-routes-add', renderer='templates/group-routes/add.pt',
permission='group-routes-add')
def view_routes_add(request):
req = request
ses = request.session
form = get_form(request, AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
req.session[SESS_ADD_FAILED] = e.render()
return HTTPFound(location=req.route_url('group-routes-add'))
save_request(request, dict(controls))
return routes_list(request)
elif SESS_ADD_FAILED in req.session:
return request.session_failed(SESS_ADD_FAILED)
return dict(form=form.render())
########
# Edit #
########
def query_id(request):
return DBSession.query(GroupRoutePermission).filter_by(group_id=request.matchdict['id'],
route_id=request.matchdict['id2'])
def id_not_found(request):
msg = 'group ID %s routes ID %s Tidak Ditemukan.' % (request.matchdict['id'], request.matchdict['id2'])
request.session.flash(msg, 'error')
return routes_list()
##########
# Delete #
##########
@view_config(route_name='group-routes-delete', renderer='templates/group-routes/delete.pt',
permission='group-routes-delete')
def view_routes_delete(request):
request = request
q = query_id(request)
row = q.first()
if not row:
return id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'group ID %d routes ID %d sudah dihapus.' % (row.group_id, row.route_id)
try:
q.delete()
DBSession.flush()
except:
msg = 'group ID %d routes ID %d tidak dapat dihapus.' % (row.id, row.route_id)
request.session.flash(msg)
return routes_list(request)
return dict(row=row,
form=form.render())
|
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""Module that defines the behaviour common to all requests handlers used by mocker.
"""
import abc
import http.server
import json
import logging
import socket
import time
import traceback
from urllib.parse import parse_qs, urlparse
from exceptions import EndpointException
# pylint: disable=C0103
log = logging.getLogger(__name__)
class BaseHTTPRequestHandler(http.server.BaseHTTPRequestHandler,
metaclass=abc.ABCMeta):
"""HTTP request handler base class that implements all common behaviour
shared across mocker's request handlers.
"""
@abc.abstractmethod
def _calculate_response(self, base_path, url_args, body_args=None):
"""Calculate response basing on the request arguments.
Methods overriding it should return a response body that reflects
requests arguments and path.
Args:
base_path (str): request's path without query parameters
url_args (dict): a dictionary containing all the query arguments
encoded in request path
body_args (dict): a dictionary containing all the arguments encoded
in the body of the request
Returns:
A bytes array, exactly as it should be send to the client.
Raises:
EndpointException: This exception signalizes that the normal
processing of the request should be stopped, and the response
with given status&content-encoding&body should be immediately
sent.
"""
pass
@abc.abstractmethod
def _parse_request_body(self):
"""Extract requests arguments encoded in it's body
Methods overriding it should parse request body in a way that's
suitable for given request handler.
Returns:
It depends on the request handler - it may be a dict, a string,
or anything/nothing.
Raises:
EndpointException: This exception signalizes that the normal
processing of the request should be stopped, and the response
with given status&content-encoding&body should be immediately
sent.
"""
pass
def _process_commands(self, blob):
"""Process all the endpoint configuration and execute things that
user requested.
Please check the Returns section to understand how chaining response
handling/overriding this method look like.
Arguments:
blob (bytes array): data that is meant to be sent to the client.
Returns:
True/False depending on whether response was handled by this method
or not. Basing on it calling method determines if it should continue
processing data.
"""
ctx = self.server.context
with ctx.lock:
do_always_bork = ctx.data['always_bork']
do_always_redirect = ctx.data['always_redirect']
redirect_target = ctx.data['redirect_target']
do_always_stall = ctx.data['always_stall']
stall_time = ctx.data['stall_time']
if do_always_stall:
msg_fmt = "Endpoint `%s` waiting `%f` seconds as requested"
log.debug(msg_fmt, ctx.data['endpoint_id'], stall_time)
time.sleep(stall_time)
# This does not end request processing
if do_always_bork:
msg_fmt = "Endpoint `%s` sending broken response as requested"
log.debug(msg_fmt, ctx.data['endpoint_id'])
blob = b"Broken response due to `always_bork` flag being set"
self._finalize_request(500, 'text/plain; charset=utf-8', blob)
return True
if do_always_redirect:
msg_fmt = "Endpoint `%s` sending redirect to `%s` as requested"
log.debug(msg_fmt, ctx.data['endpoint_id'], redirect_target)
headers = {"Location": redirect_target}
self._finalize_request(307,
'text/plain; charset=utf-8',
blob,
extra_headers=headers)
return True
return False
def log_message(self, log_format, *args):
"""Just a patch to make Mockers Requests Handlers compatible with
Unix Sockets.
Method logs the request without source IP address/with hard-coded value
of `unix-socket-connection` if the socket is a Unix Socket.
Please check the http.server.BaseHTTPRequestHandler documentation
for the meaning of the function arguments.
"""
endpoint_id = self.server.context.data['endpoint_id']
if self.server.address_family == socket.AF_UNIX:
log.debug("[Endpoint: %s] %s - - [%s] %s\n",
endpoint_id,
"unix-socket-connection",
self.log_date_time_string(),
log_format % args)
else:
log.debug("[Endpoint: %s] %s - - [%s] %s\n",
endpoint_id,
self.address_string(),
self.log_date_time_string(),
log_format % args)
def _finalize_request(self, code, content_type, blob, extra_headers=None):
"""A helper function meant to abstract sending request to client
Arguments:
code (int): HTTP response code to send
content_type (string): HTTP content type value of the response
blob (b''): data to send to the client in the body of the request
extra_headers (dict): extra headers that should be set in the reply
"""
try:
self.send_response(code)
self.send_header('Content-type', content_type)
if extra_headers is not None:
for name, val in extra_headers.items():
self.send_header(name, val)
self.end_headers()
self.wfile.write(blob)
except BrokenPipeError:
log.warn("Client already closed the connection, "
"aborting sending the response")
@staticmethod
def _convert_data_to_blob(data):
"""A helper function meant to simplify converting python objects to
bytes arrays.
Arguments:
data: data to convert to b''. Can be anything as long as it's JSON
serializable.
Returns:
A resulting byte sequence
"""
return json.dumps(data,
indent=4,
sort_keys=True,
ensure_ascii=False,
).encode('utf-8',
errors='backslashreplace')
def _parse_request_path(self):
"""Parse query arguments in the request path to dict.
Returns:
A tuple that contains a request path stripped of query arguments
and a dict containing all the query arguments (if any).
"""
parsed_url = urlparse(self.path)
path_component = parsed_url.path
query_components = parse_qs(parsed_url.query)
return path_component, query_components
def _unified_method_handler(self):
"""A unified entry point for all request types.
This method is meant to be top level entry point for all requests.
This class specifies only GET|POST for now, but other handlers can
add request types if necessary.
All query parameters are extracted (both from the uri and the body),
and the handlers self._calculate_response method is called to produce
a correct response. Handlers may terminate this workflow by raising
EndpointException if necessary. All other exceptions are also caught and
apart from being logged, are also send to the client in order to
make debugging potential problems easier and failures more explicit.
"""
try:
path, url_args = self._parse_request_path()
body_args = self._parse_request_body()
status, content_type, blob = self._calculate_response(path, url_args, body_args)
except EndpointException as e:
self._finalize_request(e.code, e.content_type, e.reason)
# Pylint, please trust me on this one ;)
# pylint: disable=W0703
except Exception:
endpoint_id = self.server.context.data['endpoint_id']
msg_fmt = ("Exception occurred while handling the request in "
"endpoint `%s`")
log.exception(msg_fmt, endpoint_id)
# traceback.format_exc() returns str, i.e. text, i.e. a sequence of
# unicode code points. UTF-8 is a unicode-complete codec. That is,
# any and all unicode code points can be encoded.
blob = traceback.format_exc().encode('utf-8')
self._finalize_request(500, 'text/plain; charset=utf-8', blob)
else:
request_handled = self._process_commands(blob)
# No need to specify character encoding if type is json:
# http://stackoverflow.com/a/9254967
if not request_handled:
self._finalize_request(status, content_type, blob)
def do_GET(self):
"""Please check the http.server.BaseHTTPRequestHandler documentation
for the method description.
Worth noting is that GET request can also be a POST - can have both
request path arguments and body arguments.
http://stackoverflow.com/a/2064369
"""
self._unified_method_handler()
def do_POST(self):
"""Please check the http.server.BaseHTTPRequestHandler documentation
for the method description.
Worth noting is that GET request can also be a POST - can have both
request path arguments and body arguments.
http://stackoverflow.com/a/2064369
"""
self._unified_method_handler()
|
|
import multiprocessing
import os
import shutil
import subprocess
import logging
import lib.db as db
import lib.config as config
from lib.exceptions import UnableToAlignException
def create_tiff(raw_path, tmp_path):
exe = os.path.join(config.get_path('ufraw_bindir'), 'ufraw-batch')
args = config.get_exe_args('ufraw-batch')
proc = subprocess.Popen(
[exe] + args + ['--output={}'.format(tmp_path), raw_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
def copy_exif(fin, fout):
exe = os.path.join(config.get_path('exiftool'), 'exiftool')
proc = subprocess.Popen(
[
exe,
'-TagsFromFile',
fin,
fout
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
def align_tiffs(tiffs, img_name):
log = logging.getLogger()
log.info("Aligning images for {}".format(img_name))
tmp_dir = config.get_path('tmp')
exe = os.path.join(config.get_path('hugin_bindir'), 'align_image_stack')
pid = os.getpid()
path = os.path.join(tmp_dir, "{}.hdr".format(img_name))
p_out = subprocess.Popen(
[
exe,
'-i',
'-o', path
] + tiffs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p_out.communicate()
if not os.path.exists(path):
raise UnableToAlignException("Tried: '{} -i -o {} {}'".format(exe, path, ''.join(tiffs)))
return path
def tonemap(hdr, tmo, img_name):
log = logging.getLogger()
outdir = config.get_path('tmp')
pfstools_path = config.get_path('pfstools_bindir')
tmo_name = tmo.rsplit('_', 1)[1]
outfile=os.path.join(outdir, "{}.{}.ppm".format(img_name, tmo_name))
log.info("Tonemapping {} with algorithm {}".format(img_name, tmo_name))
settings = config.get_tmo_options(tmo)
p_in = subprocess.Popen(
[
os.path.join(pfstools_path, 'pfsin'),
'--quiet',
hdr
],
stdout=subprocess.PIPE)
p_tone = subprocess.Popen(
[
os.path.join(pfstools_path, tmo)
] + config.get_exe_args(tmo),
stdin=p_in.stdout,
stdout=subprocess.PIPE)
if 'gamma' in settings:
p_gamma = subprocess.Popen(
[
os.path.join(pfstools_path, 'pfsgamma'),
'-g', settings['gamma']
],
stdin=p_tone.stdout,
stdout=subprocess.PIPE)
outfd = p_gamma.stdout
else:
outfd = p_tone.stdout
p_out = subprocess.Popen(
[
os.path.join(pfstools_path, 'pfsout'),
outfile
],
stdin=outfd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p_in.stdout.close()
p_tone.stdout.close()
if 'gamma' in settings:
outfd.close()
out, err = p_out.communicate()
return outfile
def merge_images(base_img, overlay_imgs, img_name):
outdir=config.get_path('tmp')
outfile = os.path.join(outdir, "{}.hdr.tiff".format(img_name))
overlay = []
for img, settings in overlay_imgs.items():
args = "( {} -trim -alpha set -channel A -evaluate set {}% ) -compose overlay -composite".format(img, settings['opacity'])
overlay += args.split(" ")
proc = subprocess.Popen(
[
'/usr/bin/convert',
base_img,
] + overlay + [ outfile ],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
copy_exif_data([base_img], outfile)
return outfile
def remove_files(files):
for f in files:
os.remove(f)
def create_hdr(tiffs, img_name):
outdir = config.get_path('outdir')
out_settings = config.get_output_options()
aligned = align_tiffs(tiffs, img_name)
tonemaps = {}
for tmo in config.get_tmos():
tmo_name = tmo.rsplit('_', 1)[1]
f = tonemap(aligned, tmo, img_name)
copy_exif_data(tiffs, f)
if out_settings.getboolean('save_tonemaps'):
save_tiff(f, "{}.{}".format(img_name, tmo_name))
tonemaps[f] = {'opacity': config.get_tmo_options(tmo)['opacity']}
hdr_img = merge_images(tiffs[0], tonemaps, img_name)
save_tiff(hdr_img, "{}.{}".format(img_name, out_settings['hdr_suffix']))
if not out_settings.getboolean('save_tmp_files'):
remove_files([aligned, hdr_img] + list(tonemaps.keys()))
def save_tiff(tiff, outname):
log = logging.getLogger()
settings = config.get_output_options()
outfile = os.path.join(
config.get_path('outdir'),
"{}.{}".format(outname, settings['format']))
log.info("saving {} as {}".format(tiff, outfile))
exe = os.path.join(config.get_path('imagick_bindir'), 'convert')
args = config.get_exe_args('output')
proc = subprocess.Popen(
[exe, tiff] + args + [outfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
copy_exif_data([tiff], outfile)
def copy_exif_data(sources, target):
source = sources[0]
proc = subprocess.Popen(
[
'/usr/bin/exiftool',
'-overwrite_original',
'-TagsFromFile',
source,
target
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
shutil.copystat(source, target)
def copy_images(files, outdir):
if not os.path.isdir(outdir):
os.makedirs(outdir)
for f in files:
shutil.copyfile(f['path'], os.path.join(outdir, f['name']))
shutil.copystat(f['path'], os.path.join(outdir, f['name']))
def process_raws(files, tmp_dir):
log = logging.getLogger()
num_files = len(files)
tiffs = []
orig_saved = False
save_all_raws = config.get_output_options().getboolean('save_all_brackets')
if not files:
return
for f in sorted(files, key=lambda x: x['name']):
if num_files < 2 and 'processed' in f and f['processed']:
return
f['short_name'] = f['name'].rsplit(".", 1)[0]
if f['seq'] and f['seq'] < 2:
img_name = f['short_name']
tiff = os.path.join(tmp_dir, "{}.tiff".format(f['short_name']))
create_tiff(f['path'], tiff)
shutil.copystat(f['path'], tiff)
if save_all_raws or not orig_saved:
save_tiff(tiff, f['short_name'])
orig_saved = True
tiffs.append(tiff)
if num_files > 1:
try:
create_hdr(tiffs, img_name)
except UnableToAlignException:
log.warning("Could not align parts for {}, unable to create HDR".format(img_name))
if not config.get_output_options().getboolean('save_tmp_files'):
remove_files(tiffs)
class ImageProcessor(multiprocessing.Process):
def __init__(self, images):
super(ImageProcessor, self).__init__()
self.log = logging.getLogger()
self.images = images
self.db_url = config.get_path('db')
self.archive_dir = config.get_path('archive')
self.outdir = config.get_path('outdir')
self.tmp_dir = config.get_path('tmp')
def copy_images(self, files):
copy_images(files, self.outdir)
def process_raws(self, files):
process_raws(files, self.tmp_dir)
def run(self):
conn = db.open_db(self.db_url)
for img in self.images:
cur = conn.cursor()
meta = db.get_files_for_image(cur, img)
raws = [x for x, y in meta.items() if y['type'] in config.FT_RAW]
non_raws = [x for x, y in meta.items() if x not in raws]
for root, dirs, files in os.walk(self.archive_dir):
for f in files:
if f in meta:
meta[f]['path'] = os.path.join(root, f)
non_raws_id = [x.rsplit('.', 1)[0] for x in non_raws]
raw_processing = []
copy = []
for name, attrs in meta.items():
if name in raws:
if name.rsplit('.', 1)[0] in non_raws_id:
attrs['processed'] = True
else:
attrs['processed'] = False
raw_processing.append(attrs)
else:
copy.append(attrs)
self.copy_images(copy)
self.log.info("processing {} raws for image {}".format(len(raw_processing), img))
self.process_raws(raw_processing)
db.set_image_handled(cur, img)
cur.close()
conn.commit()
|
|
# Copyright 2012 OpenStack Foundation.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from glance.common import crypt
from glance.common import exception
from glance.common import utils
import glance.context
import glance.db
from glance.db.sqlalchemy import api
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
CONF = cfg.CONF
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
@mock.patch('oslo_utils.importutils.import_module')
class TestDbUtilities(test_utils.BaseTestCase):
def setUp(self):
super(TestDbUtilities, self).setUp()
self.config(data_api='silly pants')
self.api = mock.Mock()
def test_get_api_calls_configure_if_present(self, import_module):
import_module.return_value = self.api
self.assertEqual(glance.db.get_api(), self.api)
import_module.assert_called_once_with('silly pants')
self.api.configure.assert_called_once_with()
def test_get_api_skips_configure_if_missing(self, import_module):
import_module.return_value = self.api
del self.api.configure
self.assertEqual(glance.db.get_api(), self.api)
import_module.assert_called_once_with('silly pants')
self.assertFalse(hasattr(self.api, 'configure'))
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
UUID1_LOCATION = 'file:///path/to/image'
UUID1_LOCATION_METADATA = {'key': 'value'}
UUID3_LOCATION = 'http://somehost.com/place'
CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c'
CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55'
def _db_fixture(id, **kwargs):
obj = {
'id': id,
'name': None,
'is_public': False,
'properties': {},
'checksum': None,
'owner': None,
'status': 'queued',
'tags': [],
'size': None,
'locations': [],
'protected': False,
'disk_format': None,
'container_format': None,
'deleted': False,
'min_ram': None,
'min_disk': None,
}
obj.update(kwargs)
return obj
def _db_image_member_fixture(image_id, member_id, **kwargs):
obj = {
'image_id': image_id,
'member': member_id,
}
obj.update(kwargs)
return obj
def _db_task_fixture(task_id, type, status, **kwargs):
obj = {
'id': task_id,
'type': type,
'status': status,
'input': None,
'result': None,
'owner': None,
'message': None,
'deleted': False,
}
obj.update(kwargs)
return obj
class TestImageRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self._create_images()
self._create_image_members()
def _create_images(self):
self.db.reset()
self.images = [
_db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM,
name='1', size=256,
is_public=True, status='active',
locations=[{'url': UUID1_LOCATION,
'metadata': UUID1_LOCATION_METADATA,
'status': 'active'}]),
_db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1,
name='2', size=512, is_public=False),
_db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1,
name='3', size=1024, is_public=True,
locations=[{'url': UUID3_LOCATION,
'metadata': {},
'status': 'active'}]),
_db_fixture(UUID4, owner=TENANT4, name='4', size=2048),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID2, TENANT2),
_db_image_member_fixture(UUID2, TENANT3, status='accepted'),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_get(self):
image = self.image_repo.get(UUID1)
self.assertEqual(UUID1, image.image_id)
self.assertEqual('1', image.name)
self.assertEqual(set(['ping', 'pong']), image.tags)
self.assertEqual('public', image.visibility)
self.assertEqual('active', image.status)
self.assertEqual(256, image.size)
self.assertEqual(TENANT1, image.owner)
def test_location_value(self):
image = self.image_repo.get(UUID3)
self.assertEqual(UUID3_LOCATION, image.locations[0]['url'])
def test_location_data_value(self):
image = self.image_repo.get(UUID1)
self.assertEqual(UUID1_LOCATION, image.locations[0]['url'])
self.assertEqual(UUID1_LOCATION_METADATA,
image.locations[0]['metadata'])
def test_location_data_exists(self):
image = self.image_repo.get(UUID2)
self.assertEqual([], image.locations)
def test_get_not_found(self):
fake_uuid = str(uuid.uuid4())
exc = self.assertRaises(exception.NotFound, self.image_repo.get,
fake_uuid)
self.assertIn(fake_uuid, utils.exception_to_str(exc))
def test_get_forbidden(self):
self.assertRaises(exception.NotFound, self.image_repo.get, UUID4)
def test_list(self):
images = self.image_repo.list()
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids)
def _do_test_list_status(self, status, expected):
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT3)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
images = self.image_repo.list(member_status=status)
self.assertEqual(expected, len(images))
def test_list_status(self):
self._do_test_list_status(None, 3)
def test_list_status_pending(self):
self._do_test_list_status('pending', 2)
def test_list_status_rejected(self):
self._do_test_list_status('rejected', 2)
def test_list_status_all(self):
self._do_test_list_status('all', 3)
def test_list_with_marker(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0])
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(full_ids[1:], actual_ids)
def test_list_with_last_marker(self):
images = self.image_repo.list()
marked_images = self.image_repo.list(marker=images[-1].image_id)
self.assertEqual(0, len(marked_images))
def test_limited_list(self):
limited_images = self.image_repo.list(limit=2)
self.assertEqual(2, len(limited_images))
def test_list_with_marker_and_limit(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0], limit=1)
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(full_ids[1:2], actual_ids)
def test_list_private_images(self):
filters = {'visibility': 'private'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID2]), image_ids)
def test_list_with_checksum_filter_single_image(self):
filters = {'checksum': CHECKSUM}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEqual(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_checksum_filter_multiple_images(self):
filters = {'checksum': CHCKSUM1}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEqual(2, len(image_ids))
self.assertIn(UUID2, image_ids)
self.assertIn(UUID3, image_ids)
def test_list_with_wrong_checksum(self):
WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd'
filters = {'checksum': WRONG_CHKSUM}
images = self.image_repo.list(filters=filters)
self.assertEqual(0, len(images))
def test_list_with_tags_filter_single_tag(self):
filters = {'tags': ['ping']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEqual(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags(self):
filters = {'tags': ['ping', 'pong']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEqual(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags_and_nonexistent(self):
filters = {'tags': ['ping', 'fake']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEqual(0, len(image_ids))
def test_list_with_wrong_tags(self):
filters = {'tags': ['fake']}
images = self.image_repo.list(filters=filters)
self.assertEqual(0, len(images))
def test_list_public_images(self):
filters = {'visibility': 'public'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID3]), image_ids)
def test_sorted_list(self):
images = self.image_repo.list(sort_key=['size'], sort_dir=['asc'])
image_ids = [i.image_id for i in images]
self.assertEqual([UUID1, UUID2, UUID3], image_ids)
def test_sorted_list_with_multiple_keys(self):
temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM,
name='1', size=1024,
is_public=True, status='active',
locations=[{'url': UUID1_LOCATION,
'metadata': UUID1_LOCATION_METADATA,
'status': 'active'}])
self.db.image_create(None, image)
images = self.image_repo.list(sort_key=['name', 'size'],
sort_dir=['asc'])
image_ids = [i.image_id for i in images]
self.assertEqual([UUID1, temp_id, UUID2, UUID3], image_ids)
images = self.image_repo.list(sort_key=['size', 'name'],
sort_dir=['asc'])
image_ids = [i.image_id for i in images]
self.assertEqual([UUID1, UUID2, temp_id, UUID3], image_ids)
def test_sorted_list_with_multiple_dirs(self):
temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM,
name='1', size=1024,
is_public=True, status='active',
locations=[{'url': UUID1_LOCATION,
'metadata': UUID1_LOCATION_METADATA,
'status': 'active'}])
self.db.image_create(None, image)
images = self.image_repo.list(sort_key=['name', 'size'],
sort_dir=['asc', 'desc'])
image_ids = [i.image_id for i in images]
self.assertEqual([temp_id, UUID1, UUID2, UUID3], image_ids)
images = self.image_repo.list(sort_key=['name', 'size'],
sort_dir=['desc', 'asc'])
image_ids = [i.image_id for i in images]
self.assertEqual([UUID3, UUID2, UUID1, temp_id], image_ids)
def test_add_image(self):
image = self.image_factory.new_image(name='added image')
self.assertEqual(image.updated_at, image.created_at)
self.image_repo.add(image)
retreived_image = self.image_repo.get(image.image_id)
self.assertEqual('added image', retreived_image.name)
self.assertEqual(image.updated_at, retreived_image.updated_at)
def test_save_image(self):
image = self.image_repo.get(UUID1)
original_update_time = image.updated_at
image.name = 'foo'
image.tags = ['king', 'kong']
self.image_repo.save(image)
current_update_time = image.updated_at
self.assertTrue(current_update_time > original_update_time)
image = self.image_repo.get(UUID1)
self.assertEqual('foo', image.name)
self.assertEqual(set(['king', 'kong']), image.tags)
self.assertEqual(current_update_time, image.updated_at)
def test_save_image_not_found(self):
fake_uuid = str(uuid.uuid4())
image = self.image_repo.get(UUID1)
image.image_id = fake_uuid
exc = self.assertRaises(exception.NotFound, self.image_repo.save,
image)
self.assertIn(fake_uuid, utils.exception_to_str(exc))
def test_remove_image(self):
image = self.image_repo.get(UUID1)
previous_update_time = image.updated_at
self.image_repo.remove(image)
self.assertTrue(image.updated_at > previous_update_time)
self.assertRaises(exception.NotFound, self.image_repo.get, UUID1)
def test_remove_image_not_found(self):
fake_uuid = str(uuid.uuid4())
image = self.image_repo.get(UUID1)
image.image_id = fake_uuid
exc = self.assertRaises(exception.NotFound, self.image_repo.remove,
image)
self.assertIn(fake_uuid, utils.exception_to_str(exc))
class TestEncryptedLocations(test_utils.BaseTestCase):
def setUp(self):
super(TestEncryptedLocations, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self.crypt_key = '0123456789abcdef'
self.config(metadata_encryption_key=self.crypt_key)
self.foo_bar_location = [{'url': 'foo', 'metadata': {},
'status': 'active'},
{'url': 'bar', 'metadata': {},
'status': 'active'}]
def test_encrypt_locations_on_add(self):
image = self.image_factory.new_image(UUID1)
image.locations = self.foo_bar_location
self.image_repo.add(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual([l['url'] for l in self.foo_bar_location],
decrypted_locations)
def test_encrypt_locations_on_save(self):
image = self.image_factory.new_image(UUID1)
self.image_repo.add(image)
image.locations = self.foo_bar_location
self.image_repo.save(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual([l['url'] for l in self.foo_bar_location],
decrypted_locations)
def test_decrypt_locations_on_get(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'}
for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.get(UUID1)
self.assertIn('id', image.locations[0])
self.assertIn('id', image.locations[1])
image.locations[0].pop('id')
image.locations[1].pop('id')
self.assertEqual(orig_locations, image.locations)
def test_decrypt_locations_on_list(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'}
for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.list()[0]
self.assertIn('id', image.locations[0])
self.assertIn('id', image.locations[1])
image.locations[0].pop('id')
image.locations[1].pop('id')
self.assertEqual(orig_locations, image.locations)
class TestImageMemberRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_member_factory = glance.domain.ImageMemberFactory()
self._create_images()
self._create_image_members()
image = self.image_repo.get(UUID1)
self.image_member_repo = glance.db.ImageMemberRepo(self.context,
self.db, image)
def _create_images(self):
self.images = [
_db_fixture(UUID1, owner=TENANT1, name='1', size=256,
status='active'),
_db_fixture(UUID2, owner=TENANT1, name='2',
size=512, is_public=False),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID1, TENANT2),
_db_image_member_fixture(UUID1, TENANT3),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_list(self):
image_members = self.image_member_repo.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([TENANT2, TENANT3]), image_member_ids)
def test_list_no_members(self):
image = self.image_repo.get(UUID2)
self.image_member_repo_uuid2 = glance.db.ImageMemberRepo(
self.context, self.db, image)
image_members = self.image_member_repo_uuid2.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([]), image_member_ids)
def test_save_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
image_member.status = 'accepted'
self.image_member_repo.save(image_member)
image_member_updated = self.image_member_repo.get(TENANT2)
self.assertEqual(image_member.id, image_member_updated.id)
self.assertEqual('accepted', image_member_updated.status)
def test_add_image_member(self):
image = self.image_repo.get(UUID1)
image_member = self.image_member_factory.new_image_member(image,
TENANT4)
self.assertIsNone(image_member.id)
self.image_member_repo.add(image_member)
retreived_image_member = self.image_member_repo.get(TENANT4)
self.assertIsNotNone(retreived_image_member.id)
self.assertEqual(image_member.image_id,
retreived_image_member.image_id)
self.assertEqual(image_member.member_id,
retreived_image_member.member_id)
self.assertEqual('pending', retreived_image_member.status)
def test_add_duplicate_image_member(self):
image = self.image_repo.get(UUID1)
image_member = self.image_member_factory.new_image_member(image,
TENANT4)
self.assertIsNone(image_member.id)
self.image_member_repo.add(image_member)
retreived_image_member = self.image_member_repo.get(TENANT4)
self.assertIsNotNone(retreived_image_member.id)
self.assertEqual(image_member.image_id,
retreived_image_member.image_id)
self.assertEqual(image_member.member_id,
retreived_image_member.member_id)
self.assertEqual('pending', retreived_image_member.status)
self.assertRaises(exception.Duplicate, self.image_member_repo.add,
image_member)
def test_get_image_member(self):
image = self.image_repo.get(UUID1)
image_member = self.image_member_factory.new_image_member(image,
TENANT4)
self.assertIsNone(image_member.id)
self.image_member_repo.add(image_member)
member = self.image_member_repo.get(image_member.member_id)
self.assertEqual(member.id, image_member.id)
self.assertEqual(member.image_id, image_member.image_id)
self.assertEqual(member.member_id, image_member.member_id)
self.assertEqual('pending', member.status)
def test_get_nonexistent_image_member(self):
fake_image_member_id = 'fake'
self.assertRaises(exception.NotFound, self.image_member_repo.get,
fake_image_member_id)
def test_remove_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
self.image_member_repo.remove(image_member)
self.assertRaises(exception.NotFound, self.image_member_repo.get,
TENANT2)
def test_remove_image_member_does_not_exist(self):
fake_uuid = str(uuid.uuid4())
image = self.image_repo.get(UUID2)
fake_member = glance.domain.ImageMemberFactory().new_image_member(
image, TENANT4)
fake_member.id = fake_uuid
exc = self.assertRaises(exception.NotFound,
self.image_member_repo.remove,
fake_member)
self.assertIn(fake_uuid, utils.exception_to_str(exc))
class TestTaskRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestTaskRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(user=USER1,
tenant=TENANT1)
self.task_repo = glance.db.TaskRepo(self.context, self.db)
self.task_factory = glance.domain.TaskFactory()
self.fake_task_input = ('{"import_from": '
'"swift://cloud.foo/account/mycontainer/path"'
',"import_from_format": "qcow2"}')
self._create_tasks()
def _create_tasks(self):
self.db.reset()
self.tasks = [
_db_task_fixture(UUID1, type='import', status='pending',
input=self.fake_task_input,
result='',
owner=TENANT1,
message='',
),
_db_task_fixture(UUID2, type='import', status='processing',
input=self.fake_task_input,
result='',
owner=TENANT1,
message='',
),
_db_task_fixture(UUID3, type='import', status='failure',
input=self.fake_task_input,
result='',
owner=TENANT1,
message='',
),
_db_task_fixture(UUID4, type='import', status='success',
input=self.fake_task_input,
result='',
owner=TENANT2,
message='',
),
]
[self.db.task_create(None, task) for task in self.tasks]
def test_get(self):
task = self.task_repo.get(UUID1)
self.assertEqual(task.task_id, UUID1)
self.assertEqual('import', task.type)
self.assertEqual('pending', task.status)
self.assertEqual(task.task_input, self.fake_task_input)
self.assertEqual('', task.result)
self.assertEqual('', task.message)
self.assertEqual(task.owner, TENANT1)
def test_get_not_found(self):
self.assertRaises(exception.NotFound,
self.task_repo.get,
str(uuid.uuid4()))
def test_get_forbidden(self):
self.assertRaises(exception.NotFound,
self.task_repo.get,
UUID4)
def test_list(self):
tasks = self.task_repo.list()
task_ids = set([i.task_id for i in tasks])
self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids)
def test_list_with_type(self):
filters = {'type': 'import'}
tasks = self.task_repo.list(filters=filters)
task_ids = set([i.task_id for i in tasks])
self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids)
def test_list_with_status(self):
filters = {'status': 'failure'}
tasks = self.task_repo.list(filters=filters)
task_ids = set([i.task_id for i in tasks])
self.assertEqual(set([UUID3]), task_ids)
def test_list_with_marker(self):
full_tasks = self.task_repo.list()
full_ids = [i.task_id for i in full_tasks]
marked_tasks = self.task_repo.list(marker=full_ids[0])
actual_ids = [i.task_id for i in marked_tasks]
self.assertEqual(full_ids[1:], actual_ids)
def test_list_with_last_marker(self):
tasks = self.task_repo.list()
marked_tasks = self.task_repo.list(marker=tasks[-1].task_id)
self.assertEqual(0, len(marked_tasks))
def test_limited_list(self):
limited_tasks = self.task_repo.list(limit=2)
self.assertEqual(2, len(limited_tasks))
def test_list_with_marker_and_limit(self):
full_tasks = self.task_repo.list()
full_ids = [i.task_id for i in full_tasks]
marked_tasks = self.task_repo.list(marker=full_ids[0], limit=1)
actual_ids = [i.task_id for i in marked_tasks]
self.assertEqual(full_ids[1:2], actual_ids)
def test_sorted_list(self):
tasks = self.task_repo.list(sort_key='status', sort_dir='desc')
task_ids = [i.task_id for i in tasks]
self.assertEqual([UUID2, UUID1, UUID3], task_ids)
def test_add_task(self):
task_type = 'import'
task = self.task_factory.new_task(task_type, None,
task_input=self.fake_task_input)
self.assertEqual(task.updated_at, task.created_at)
self.task_repo.add(task)
retrieved_task = self.task_repo.get(task.task_id)
self.assertEqual(task.updated_at, retrieved_task.updated_at)
self.assertEqual(self.fake_task_input, retrieved_task.task_input)
def test_save_task(self):
task = self.task_repo.get(UUID1)
original_update_time = task.updated_at
self.task_repo.save(task)
current_update_time = task.updated_at
self.assertTrue(current_update_time > original_update_time)
task = self.task_repo.get(UUID1)
self.assertEqual(current_update_time, task.updated_at)
def test_remove_task(self):
task = self.task_repo.get(UUID1)
self.task_repo.remove(task)
self.assertRaises(exception.NotFound,
self.task_repo.get,
task.task_id)
class RetryOnDeadlockTestCase(test_utils.BaseTestCase):
def test_raise_deadlock(self):
class TestException(Exception):
pass
self.attempts = 3
def _mock_get_session():
def _raise_exceptions():
self.attempts -= 1
if self.attempts <= 0:
raise TestException("Exit")
raise db_exc.DBDeadlock("Fake Exception")
return _raise_exceptions
with mock.patch.object(api, 'get_session') as sess:
sess.side_effect = _mock_get_session()
try:
api._image_update(None, {}, 'fake-id')
except TestException:
self.assertEqual(3, sess.call_count)
# Test retry on image destroy if db deadlock occurs
self.attempts = 3
with mock.patch.object(api, 'get_session') as sess:
sess.side_effect = _mock_get_session()
try:
api.image_destroy(None, 'fake-id')
except TestException:
self.assertEqual(3, sess.call_count)
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "prang-"
cfg.versionfile_source = "prang/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
import numpy as np
try:
from .im2col_cython import col2im_cython, im2col_cython
except ImportError:
print('run the following from the cs231n directory and try again:')
print('python setup.py build_ext --inplace')
print('You may also need to restart your iPython kernel')
from .im2col import (
# get_im2col_indices,
# im2col_indices,
col2im_indices,
)
def conv_forward_fast(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width,
# pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the
pooling regions are square and tile the input image, then we can use
the reshape method which is very fast. Otherwise we fall back on
the im2col method, which is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = (
pool_param['pool_height'], pool_param['pool_width']
)
stride = pool_param['stride']
same_size = (pool_height == pool_width == stride)
tiles = (H % pool_height == 0 and W % pool_width == 0)
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that
uses some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = (
pool_param['pool_height'], pool_param['pool_width']
)
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split
the upstream gradient equally among all argmax elements;
this should result in a valid subgradient.
You can make this happen by uncommenting the line below; however this
results in a significant performance penalty (about 40% slower) and is
unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = (
pool_param['pool_height'], pool_param['pool_width']
)
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col_cython(x_split, pool_height, pool_width,
padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = (
pool_param['pool_height'], pool_param['pool_width']
)
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
# dx = col2im_cython(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
# padding=0, stride=stride)
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
frappe.translate
~~~~~~~~~~~~~~~~
Translation tools for frappe
"""
import frappe, os, re, codecs, json
from frappe.utils.jinja import render_include
from frappe.utils import strip
from jinja2 import TemplateError
import itertools, operator
def guess_language(lang_list=None):
"""Set `frappe.local.lang` from HTTP headers at beginning of request"""
lang_codes = frappe.request.accept_languages.values()
if not lang_codes:
return frappe.local.lang
guess = None
if not lang_list:
lang_list = get_all_languages() or []
for l in lang_codes:
code = l.strip()
if code in lang_list or code == "en":
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""Set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().hget("lang", user)
if not lang:
# if defined in user profile
user_lang = frappe.db.get_value("User", user, "language")
if user_lang and user_lang!="Loading...":
lang = get_lang_dict().get(user_lang, user_lang) or frappe.local.lang
else:
default_lang = frappe.db.get_default("lang")
lang = default_lang or frappe.local.lang
frappe.cache().hset("lang", user, lang or "en")
return lang
def set_default_language(language):
"""Set Global default language"""
lang = get_lang_dict().get(language, language)
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
"""Returns all language codes ar, ch etc"""
return [a.split()[0] for a in get_lang_info()]
def get_lang_dict():
"""Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`"""
return dict([[a[1], a[0]] for a in [a.split(None, 1) for a in get_lang_info()]])
def get_language_from_code(lang):
return dict(a.split(None, 1) for a in get_lang_info()).get(lang)
def get_lang_info():
"""Returns a listified version of `apps/languages.txt`"""
return frappe.cache().get_value("langinfo",
lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt")))
def get_dict(fortype, name=None):
"""Returns translation dict for a type of object.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned.
"""
fortype = fortype.lower()
cache = frappe.cache()
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.hget("translation_assets", frappe.local.lang) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql("select 'DocType:', name from tabDocType")
messages += frappe.db.sql("select 'Role:', name from tabRole")
messages += frappe.db.sql("select 'Module:', name from `tabModule Def`")
messages += frappe.db.sql("select 'Module:', label from `tabDesktop Icon` where standard=1 or owner=%s",
frappe.session.user)
translation_assets[asset_key] = make_dict_from_messages(messages)
translation_assets[asset_key].update(get_dict_from_hooks(fortype, name))
cache.hset("translation_assets", frappe.local.lang, translation_assets)
return translation_assets[asset_key]
def get_dict_from_hooks(fortype, name):
translated_dict = {}
hooks = frappe.get_hooks("get_translated_dict")
for (hook_fortype, fortype_name) in hooks:
if hook_fortype == fortype and fortype_name == name:
for method in hooks[(hook_fortype, fortype_name)]:
translated_dict.update(frappe.get_attr(method)())
return translated_dict
def add_lang_dict(code):
"""Extracts messages and returns Javascript code snippet to be appened at the end
of the given script
:param code: Javascript code snippet to which translations needs to be appended."""
messages = extract_messages_from_code(code)
messages = [message for pos, message in messages]
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
"""Returns translated messages as a dict in Language specified in `frappe.local.lang`
:param messages: List of untranslated messages
"""
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m[1] in full_dict:
out[m[1]] = full_dict[m[1]]
return out
def get_lang_js(fortype, name):
"""Returns code snippet to be appended at the end of a JS script.
:param fortype: Type of object, e.g. `DocType`
:param name: Document name
"""
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
"""Load and return the entire translations dictionary for a language from :meth:`frape.cache`
:param lang: Language Code, e.g. `hi`
"""
if not lang:
return {}
# found in local, return!
if frappe.local.lang_full_dict is not None:
return frappe.local.lang_full_dict
frappe.local.lang_full_dict = frappe.cache().hget("lang_full_dict", lang)
if frappe.local.lang_full_dict is None:
frappe.local.lang_full_dict = load_lang(lang)
# only cache file translations in this
frappe.cache().hset("lang_full_dict", lang, frappe.local.lang_full_dict)
try:
# get user specific transaltion data
user_translations = get_user_translations(lang)
except Exception:
user_translations = None
if user_translations:
frappe.local.lang_full_dict.update(user_translations)
return frappe.local.lang_full_dict
def load_lang(lang, apps=None):
"""Combine all translations from `.csv` files in all `apps`"""
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
out.update(get_translation_dict_from_file(path, lang, app))
return out
def get_translation_dict_from_file(path, lang, app):
"""load translation dict from given path"""
cleaned = {}
if os.path.exists(path):
csv_content = read_csv_file(path)
for item in csv_content:
if len(item)==3:
# with file and line numbers
cleaned[item[1]] = strip(item[2])
elif len(item)==2:
cleaned[item[0]] = strip(item[1])
else:
raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format(
app=app, lang=lang, values=repr(item).encode("utf-8")
))
return cleaned
def get_user_translations(lang):
out = frappe.cache().hget('lang_user_translations', lang)
if out is None:
out = {}
for fields in frappe.get_all('Translation',
fields= ["source_name", "target_name"],filters={'language_code': lang}):
out.update({fields.source_name: fields.target_name})
frappe.cache().hset('lang_user_translations', lang, out)
return out
# def get_user_translation_key():
# return 'lang_user_translations:{0}'.format(frappe.local.site)
def clear_cache():
"""Clear all translation assets from :meth:`frappe.cache`"""
cache = frappe.cache()
cache.delete_key("langinfo")
cache.delete_key("lang_full_dict")
cache.delete_key("translation_assets")
def get_messages_for_app(app):
"""Returns all messages (list) for a specified `app`"""
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# workflow based on doctype
messages.extend(get_messages_from_workflow(doctype=name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# workflow based on app.hooks.fixtures
messages.extend(get_messages_from_workflow(app_name=app))
# custom fields based on app.hooks.fixtures
messages.extend(get_messages_from_custom_fields(app_name=app))
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
return deduplicate_messages(messages)
def get_messages_from_doctype(name):
"""Extract all translatable messages for a doctype. Includes labels, Python code,
Javascript code, html templates"""
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options:
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# translations of roles
for d in meta.get("permissions"):
if d.role:
messages.append(d.role)
messages = [message for message in messages if message]
messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
return messages
def get_messages_from_workflow(doctype=None, app_name=None):
assert doctype or app_name, 'doctype or app_name should be provided'
# translations for Workflows
workflows = []
if doctype:
workflows = frappe.get_all('Workflow', filters={'document_type': doctype})
else:
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
for fixture in fixtures:
if isinstance(fixture, basestring) and fixture == 'Worflow':
workflows = frappe.get_all('Workflow')
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Workflow':
workflows.extend(frappe.get_all('Workflow', filters=fixture.get('filters')))
messages = []
for w in workflows:
states = frappe.db.sql(
'select distinct state from `tabWorkflow Document State` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([('Workflow: ' + w['name'], state['state']) for state in states if is_translatable(state['state'])])
states = frappe.db.sql(
'select distinct message from `tabWorkflow Document State` where parent=%s and message is not null',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], states['message'])
for state in states if is_translatable(state['state'])])
actions = frappe.db.sql(
'select distinct action from `tabWorkflow Transition` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], action['action']) \
for action in actions if is_translatable(action['action'])])
return messages
def get_messages_from_custom_fields(app_name):
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
custom_fields = []
for fixture in fixtures:
if isinstance(fixture, basestring) and fixture == 'Custom Field':
custom_fields = frappe.get_all('Custom Field')
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Custom Field':
custom_fields.extend(frappe.get_all('Custom Field', filters=fixture.get('filters'),
fields=['name','label', 'description', 'fieldtype', 'options']))
messages = []
for cf in custom_fields:
for prop in ('label', 'description'):
if not cf.get(prop) or not is_translatable(cf[prop]):
continue
messages.append(('Custom Field - {}: {}'.format(prop, cf['name']), cf[prop]))
if cf['fieldtype'] == 'Selection' and cf.get('options'):
for option in cf['options'].split('\n'):
if option and 'icon' not in option and is_translatable(option):
messages.append(('Custom Field - Description: ' + cf['name'], option))
return messages
def get_messages_from_page(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Page`"""
return _get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Report`"""
report = frappe.get_doc("Report", name)
messages = _get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
# TODO position here!
if report.query:
messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)])
messages.append((None,report.report_name))
return messages
def _get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
doc_path = frappe.get_module_path(module, doctype, name)
messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py"))
if os.path.exists(doc_path):
for filename in os.listdir(doc_path):
if filename.endswith(".js") or filename.endswith(".html"):
messages += get_messages_from_file(os.path.join(doc_path, filename))
return messages
def get_server_messages(app):
"""Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules inside an app"""
messages = []
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return messages
def get_messages_from_include_files(app_name=None):
"""Returns messages from js files included at time of boot like desk.min.js for desk and web"""
messages = []
for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return messages
def get_all_messages_from_js_files(app_name=None):
"""Extracts all translatable strings from app `.js` files"""
messages = []
for app in ([app_name] if app_name else frappe.get_installed_apps()):
if os.path.exists(frappe.get_app_path(app, "public")):
for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")):
if "frappe/public/js/lib" in basepath:
continue
for fname in files:
if fname.endswith(".js") or fname.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, fname)))
return messages
def get_messages_from_file(path):
"""Returns a list of transatable strings from a code file
:param path: path of the code file
"""
apps_path = get_bench_dir()
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return [(os.path.relpath(" +".join([path, str(pos)]), apps_path),
message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))]
else:
# print "Translate: {0} missing".format(os.path.abspath(path))
return []
def extract_messages_from_code(code, is_py=False):
"""Extracts translatable srings from a code file
:param code: code from which translatable files are to be extracted
:param is_py: include messages in triple quotes e.g. `_('''message''')`"""
try:
code = render_include(code)
except TemplateError:
# Exception will occur when it encounters John Resig's microtemplating code
pass
messages = []
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)]
messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)]
if is_py:
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)]
messages = [(pos, message) for pos, message in messages if is_translatable(message)]
return pos_to_line_no(messages, code)
def is_translatable(m):
if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"):
return True
return False
def pos_to_line_no(messages, code):
ret = []
messages = sorted(messages, key=lambda x: x[0])
newlines = [m.start() for m in re.compile('\\n').finditer(code)]
line = 1
newline_i = 0
for pos, message in messages:
while newline_i < len(newlines) and pos > newlines[newline_i]:
line+=1
newline_i+= 1
ret.append((line, message))
return ret
def read_csv_file(path):
"""Read CSV file and return as list of list
:param path: File path"""
from csv import reader
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
# for japanese! #wtf
data = data.replace(chr(28), "").replace(chr(29), "")
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[unicode(val, 'utf-8') for val in row] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
"""Write translation CSV file.
:param path: File path, usually `[app]/translations`.
:param app_messages: Translatable strings for this app.
:param lang_dict: Full translated dict.
"""
app_messages.sort(lambda x,y: cmp(x[1], y[1]))
from csv import writer
with open(path, 'wb') as msgfile:
w = writer(msgfile, lineterminator='\n')
for p, m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return (s.replace("\\\n", "|||||")
.replace("\\n", "||||")
.replace("\n", "|||"))
if get_all:
print str(len(messages)) + " messages"
with open(untranslated_file, "w") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print str(len(untranslated)) + " missing translations of " + str(len(messages))
with open(untranslated_file, "w") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print "all translated!"
def update_translations(lang, untranslated_file, translated_file):
"""Update translations from a source and target file for a given language.
:param lang: Language code (e.g. `en`).
:param untranslated_file: File path with the messages in English.
:param translated_file: File path with messages in language to be updated."""
clear_cache()
full_dict = get_full_dict(lang)
def restore_newlines(s):
return (s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n"))
translation_dict = {}
for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False)):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def import_translations(lang, path):
"""Import translations from file in standard format"""
clear_cache()
full_dict = get_full_dict(lang)
full_dict.update(get_translation_dict_from_file(path, lang, 'import'))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
"""Rebuild all translation files: `[app]/translations/[lang].csv`."""
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None, app_messages=None):
"""Write a translation file for a given language.
:param app: `app` for which translations are to be written.
:param lang: Language code.
:param full_dict: Full translated language dict (optional).
:param app_messages: Source strings (optional).
"""
if not app_messages:
app_messages = get_messages_for_app(app)
if not app_messages:
return
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
app_messages, full_dict or get_full_dict(lang))
def send_translations(translation_dict):
"""Append translated dict in `frappe.local.response`"""
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict)
def deduplicate_messages(messages):
ret = []
op = operator.itemgetter(1)
messages = sorted(messages, key=op)
for k, g in itertools.groupby(messages, op):
ret.append(g.next())
return ret
def get_bench_dir():
return os.path.join(frappe.__file__, '..', '..', '..', '..')
def rename_language(old_name, new_name):
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings == old_name:
frappe.db.set_value("System Settings", "System Settings", "language", new_name)
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
|
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
elif self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
|
import asyncio
import pytest
from click.testing import CliRunner
pytest.importorskip("requests")
import requests
import sys
import os
from time import sleep
import distributed.cli.dask_worker
from distributed import Client, Scheduler
from distributed.metrics import time
from distributed.utils import sync, tmpfile, parse_ports
from distributed.utils_test import popen, terminate_process, wait_for_port
from distributed.utils_test import loop, cleanup # noqa: F401
def test_nanny_worker_ports(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--host",
"127.0.0.1",
"--worker-port",
"9684",
"--nanny-port",
"5273",
"--no-dashboard",
]
) as worker:
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while True:
d = sync(c.loop, c.scheduler.identity)
if d["workers"]:
break
else:
assert time() - start < 5
sleep(0.1)
assert (
d["workers"]["tcp://127.0.0.1:9684"]["nanny"]
== "tcp://127.0.0.1:5273"
)
def test_nanny_worker_port_range(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]) as sched:
nprocs = 3
worker_port = "9684:9686"
nanny_port = "9688:9690"
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nprocs",
f"{nprocs}",
"--host",
"127.0.0.1",
"--worker-port",
worker_port,
"--nanny-port",
nanny_port,
"--no-dashboard",
]
) as worker:
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < nprocs:
sleep(0.1)
assert time() - start < 5
def get_port(dask_worker):
return dask_worker.port
expected_worker_ports = set(parse_ports(worker_port))
worker_ports = c.run(get_port)
assert set(worker_ports.values()) == expected_worker_ports
expected_nanny_ports = set(parse_ports(nanny_port))
nanny_ports = c.run(get_port, nanny=True)
assert set(nanny_ports.values()) == expected_nanny_ports
def test_nanny_worker_port_range_too_many_workers_raises(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nprocs",
"3",
"--host",
"127.0.0.1",
"--worker-port",
"9684:9685",
"--nanny-port",
"9686:9687",
"--no-dashboard",
]
) as worker:
assert any(
b"Could not start" in worker.stderr.readline() for _ in range(100)
)
def test_memory_limit(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"127.0.0.1:8786",
"--memory-limit",
"2e3MB",
"--no-dashboard",
]
) as worker:
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.nthreads():
sleep(0.1)
info = c.scheduler_info()
[d] = info["workers"].values()
assert isinstance(d["memory_limit"], int)
assert d["memory_limit"] == 2e9
def test_no_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
["dask-worker", "127.0.0.1:8786", "--no-nanny", "--no-dashboard"]
) as worker:
assert any(b"Registered" in worker.stderr.readline() for i in range(15))
@pytest.mark.slow
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_no_reconnect(nanny, loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
wait_for_port(("127.0.0.1", 8786))
with popen(
[
"dask-worker",
"tcp://127.0.0.1:8786",
"--no-reconnect",
nanny,
"--no-dashboard",
]
) as worker:
sleep(2)
terminate_process(sched)
start = time()
while worker.poll() is None:
sleep(0.1)
assert time() < start + 10
def test_resources(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"tcp://127.0.0.1:8786",
"--no-dashboard",
"--resources",
"A=1 B=2,C=3",
]
) as worker:
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.scheduler_info()["workers"]:
sleep(0.1)
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["resources"] == {"A": 1, "B": 2, "C": 3}
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_local_directory(loop, nanny):
with tmpfile() as fn:
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--local-directory",
fn,
]
) as worker:
with Client("127.0.0.1:8786", loop=loop, timeout=10) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 8
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["local_directory"].startswith(fn)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_scheduler_file(loop, nanny):
with tmpfile() as fn:
with popen(
["dask-scheduler", "--no-dashboard", "--scheduler-file", fn]
) as sched:
with popen(
["dask-worker", "--scheduler-file", fn, nanny, "--no-dashboard"]
):
with Client(scheduler_file=fn, loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_scheduler_address_env(loop, monkeypatch):
monkeypatch.setenv("DASK_SCHEDULER_ADDRESS", "tcp://127.0.0.1:8786")
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(["dask-worker", "--no-dashboard"]):
with Client(os.environ["DASK_SCHEDULER_ADDRESS"], loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_nprocs_requires_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
["dask-worker", "127.0.0.1:8786", "--nprocs=2", "--no-nanny"]
) as worker:
assert any(
b"Failed to launch worker" in worker.stderr.readline()
for i in range(15)
)
def test_nprocs_expands_name(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
["dask-worker", "127.0.0.1:8786", "--nprocs", "2", "--name", "0"]
) as worker:
with popen(["dask-worker", "127.0.0.1:8786", "--nprocs", "2"]) as worker:
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < 4:
sleep(0.2)
assert time() < start + 10
info = c.scheduler_info()
names = [d["name"] for d in info["workers"].values()]
foos = [n for n in names if n.startswith("0-")]
assert len(foos) == 2
assert len(set(names)) == 4
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize(
"listen_address", ["tcp://0.0.0.0:39837", "tcp://127.0.0.2:39837"]
)
def test_contact_listen_address(loop, nanny, listen_address):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--contact-address",
"tcp://127.0.0.2:39837",
"--listen-address",
listen_address,
]
) as worker:
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
assert "tcp://127.0.0.2:39837" in info["workers"]
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
assert client.run(func) == {"tcp://127.0.0.2:39837": listen_address}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
def test_respect_host_listen_address(loop, nanny, host):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(
["dask-worker", "127.0.0.1:8786", nanny, "--no-dashboard", "--host", host]
) as worker:
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
listen_addresses = client.run(func)
assert all(host in v for v in listen_addresses.values())
def test_dashboard_non_standard_ports(loop):
pytest.importorskip("bokeh")
try:
import jupyter_server_proxy # noqa: F401
proxy_exists = True
except ImportError:
proxy_exists = False
with popen(["dask-scheduler", "--port", "3449"]) as s:
with popen(
[
"dask-worker",
"tcp://127.0.0.1:3449",
"--dashboard-address",
":4833",
"--host",
"127.0.0.1",
]
) as proc:
with Client("127.0.0.1:3449", loop=loop) as c:
c.wait_for_workers(1)
pass
response = requests.get("http://127.0.0.1:4833/status")
assert response.ok
redirect_resp = requests.get("http://127.0.0.1:4833/main")
redirect_resp.ok
# TEST PROXYING WORKS
if proxy_exists:
url = "http://127.0.0.1:8787/proxy/4833/127.0.0.1/status"
response = requests.get(url)
assert response.ok
with pytest.raises(Exception):
requests.get("http://localhost:4833/status/")
def test_version_option():
runner = CliRunner()
result = runner.invoke(distributed.cli.dask_worker.main, ["--version"])
assert result.exit_code == 0
@pytest.mark.slow
@pytest.mark.parametrize("no_nanny", [True, False])
def test_worker_timeout(no_nanny):
runner = CliRunner()
args = ["192.168.1.100:7777", "--death-timeout=1"]
if no_nanny:
args.append("--no-nanny")
result = runner.invoke(distributed.cli.dask_worker.main, args)
assert result.exit_code != 0
def test_bokeh_deprecation():
pytest.importorskip("bokeh")
runner = CliRunner()
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--bokeh"])
except ValueError:
# didn't pass scheduler
pass
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--no-bokeh"])
except ValueError:
# didn't pass scheduler
pass
@pytest.mark.asyncio
async def test_integer_names(cleanup):
async with Scheduler(port=0) as s:
with popen(["dask-worker", s.address, "--name", "123"]) as worker:
while not s.workers:
await asyncio.sleep(0.01)
[ws] = s.workers.values()
assert ws.name == 123
@pytest.mark.asyncio
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
async def test_worker_class(cleanup, tmp_path, nanny):
# Create module with custom worker class
WORKER_CLASS_TEXT = """
from distributed.worker import Worker
class MyWorker(Worker):
pass
"""
tmpdir = str(tmp_path)
tmpfile = str(tmp_path / "myworker.py")
with open(tmpfile, "w") as f:
f.write(WORKER_CLASS_TEXT)
# Put module on PYTHONPATH
env = os.environ.copy()
if "PYTHONPATH" in env:
env["PYTHONPATH"] = tmpdir + ":" + env["PYTHONPATH"]
else:
env["PYTHONPATH"] = tmpdir
async with Scheduler(port=0) as s:
async with Client(s.address, asynchronous=True) as c:
with popen(
[
"dask-worker",
s.address,
nanny,
"--worker-class",
"myworker.MyWorker",
],
env=env,
) as worker:
await c.wait_for_workers(1)
def worker_type(dask_worker):
return type(dask_worker).__name__
worker_types = await c.run(worker_type)
assert all(name == "MyWorker" for name in worker_types.values())
|
|
from django.db import models
class CompanyManager(models.Manager):
def get_for_index(self, index):
return self.get(index=index)
class Company(models.Model):
"""
A Paranuaran company.
"""
# The index of the company record in the JSON source data
index = models.PositiveIntegerField(unique=True)
# Referred to as 'company' in the JSON source data
company_name = models.CharField(unique=True, max_length=100)
objects = CompanyManager()
# A current employee isn't dead yet! ;-)
@property
def current_employees(self):
return self.employees.is_alive()
def __str__(self):
return self.company_name
class Meta:
ordering = ['company_name']
verbose_name_plural = 'Companies'
class FoodstuffQuerySet(models.QuerySet):
def fruit(self):
return self.filter(type=Foodstuff.FRUIT)
def vegetables(self):
return self.filter(type=Foodstuff.VEGETABLE)
class Foodstuff(models.Model):
"""
A kind of food - initially either a fruit or a vegetable
"""
FRUIT = 'f'
VEGETABLE = 'v'
TYPE_CHOICES = (
(FRUIT, 'Fruit'),
(VEGETABLE, 'Vegetable'),
)
name = models.CharField(unique=True, max_length=100)
type = models.CharField(max_length=1, choices=TYPE_CHOICES)
objects = FoodstuffQuerySet.as_manager()
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name_plural = 'Foodstuffs'
class Tag(models.Model):
"""
A tag which can be linked to a Person
"""
label = models.CharField(unique=True, max_length=100)
def __str__(self):
return self.label
class Meta:
ordering = ['label']
verbose_name_plural = 'Tags'
class PersonQuerySet(models.QuerySet):
def is_alive(self):
return self.filter(has_died=False)
def has_brown_eyes(self):
return self.filter(eyecolor=Person.EYE_COLOR_BROWN)
def has_friend(self, friend):
return self.filter(friends=friend)
def friend_of(self, friend):
return self.filter(friend_of=friend)
class PersonManager(models.Manager):
def get_for_index(self, index):
return self.get(index=index)
# TODO: Determine what 'friendship' actually means in this context!
# Is a friendship define just by the 'has friend' (forward) relationship,
# or also by the 'friend of' (reverse) relationship.
#
# Consider:
# Jack:
# friends: Jill, Simon
#
# Jill:
# friends: Jack, Simon
#
# Simon:
# friends: (none)
#
# Susan:
# friends: Jack
#
# There are a range of reasonable answers to the question "who are Jack's
# friends":
# 1) Just the friends Jack lists: Jill & Simon
# 2) (1) plus the people who list Jack as a friend: Jill, Simon, & Susan
# 3) Only those who also consider Jack a friend: Jill (only)
#
# For the purposes of this exercise, we'll choose the easy option - 1!
def mutual_friends_alive_with_brown_eyes(self, person, friend):
# Select people who:
# 'person' considers a friend and
# 'friend' considers a friend and
# are still alive and
# have brown eyes
return (self.friend_of(person).friend_of(friend).
is_alive().has_brown_eyes())
class Person(models.Model):
"""
A Paranuaran Person
"""
EYE_COLOR_BLUE = 'bl'
EYE_COLOR_BROWN = 'br'
EYE_COLOR_CHOICES = (
(EYE_COLOR_BLUE, 'Blue'),
(EYE_COLOR_BROWN, 'Brown'),
)
GENDER_MALE = 'm'
GENDER_FEMALE = 'f'
GENDER_CHOICES = (
(GENDER_MALE, 'Male'),
(GENDER_FEMALE, 'Female'),
)
# The _id field from the JSON source file
json_id = models.CharField(unique=True, max_length=24)
# The index of of the Person record in the JSON file
index = models.PositiveIntegerField(unique=True)
guid = models.CharField(unique=True, max_length=36)
has_died = models.BooleanField()
balance = models.DecimalField(max_digits=8, decimal_places=2)
picture = models.URLField()
age = models.PositiveIntegerField()
eyecolor = models.CharField(max_length=2, choices=EYE_COLOR_CHOICES)
name = models.CharField(max_length=100)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
company = models.ForeignKey(Company, null=True, blank=True,
related_name='employees')
email = models.EmailField(unique=True)
phone = models.CharField(max_length=30)
address = models.CharField(max_length=200)
about = models.TextField()
registered = models.DateTimeField()
tags = models.ManyToManyField(Tag, blank=True)
friends = models.ManyToManyField('Person', blank=True,
related_name='friend_of')
greeting = models.CharField(max_length=100)
favourite_food = models.ManyToManyField(Foodstuff)
objects = PersonManager.from_queryset(PersonQuerySet)()
@property
def favourite_fruit(self):
return self.favourite_food.fruit()
@property
def favourite_vegetables(self):
return self.favourite_food.vegetables()
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name_plural = 'People'
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackedUser'
db.create_table('sentry_trackeduser', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('ident', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['TrackedUser'])
# Adding unique constraint on 'TrackedUser', fields ['project', 'ident']
db.create_unique('sentry_trackeduser', ['project_id', 'ident'])
def backwards(self, orm):
# Removing unique constraint on 'TrackedUser', fields ['project', 'ident']
db.delete_unique('sentry_trackeduser', ['project_id', 'ident'])
# Deleting model 'TrackedUser'
db.delete_table('sentry_trackeduser')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'ident', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
from __future__ import division # so that a / b == float(a) / b
import fire
from ep.evalplatform import draw_details
from ep.evalplatform.compatibility import plot_comparison_legacy_parse
from ep.evalplatform.parsers import *
from ep.evalplatform.parsers_image import *
from ep.evalplatform.plotting import Plotter
from ep.evalplatform.utils import *
from ep.evalplatform.yeast_datatypes import CellOccurence
SEGMENTATION_GNUPLOT_FILE = "plot_segmentation.plt"
TRACKING_GNUPLOT_FILE = "plot_tracking.plt"
parsers = [DefaultPlatformParser(), OldGroundTruthParser(), CellProfilerParser(), CellProfilerParserTracking(),
CellTracerParser(), CellIDParser(), TrackerParser(), CellSerpentParser(), CellStarParser(),
CellProfilerParserTrackingOLDTS2(), LabelImageParser(), MaskImageParser([2, 3])
]
input_type = dict([(p.symbol, p) for p in parsers])
ground_truth_parser = OldGroundTruthParser()
# Max match distance. Read from: evaluation.ini at program folder then use this default.
loaded_ini = False
cutoff = 30 # pixels
cutoff_iou = 0.3 # intersection / union
output_evaluation_details = 0
draw_evaluation_details = 0
fill_markers = False
markersize = 7
ignored_frame_size = 0
all_data_evaluated = 0
wide_plots = 0
def filter_border(celllist, image_size=(10000, 10000)):
if celllist == []:
return []
if isinstance(celllist[0], CellOccurence):
def close_to_border(cell, limits):
return not (ignored_frame_size <= cell.position[0] <= (limits[0] - ignored_frame_size) and
ignored_frame_size <= cell.position[1] <= (limits[1] - ignored_frame_size))
return [cell for cell in celllist if not cell.obligatory() or close_to_border(cell, image_size)]
elif len(celllist[0]) == 2:
return [(cell_A, cell_B) for (cell_A, cell_B) in celllist if not cell_A.obligatory() or not cell_B.obligatory()]
else:
print (celllist)
def read_ground_truth(path, parser=None):
"""
Returns::
[Cell]
"""
parser = parser or ground_truth_parser
debug_center.show_in_console(None, "Progress", "Reading ground truth data...")
debug_center.show_in_console(None, "Tech", "".join(["Uses ", parser.__class__.__name__, " parser..."]))
cells = parser.load_from_file(path)
debug_center.show_in_console(None, "Progress", "Done reading ground truth data...")
return cells
def make_all_cells_important(frame_cells):
for frame_cell in frame_cells:
frame_cell[1].colour = 0
def read_results(path, parser, name):
"""
Returns::
(algorithm_name,[Cell])
"""
debug_center.show_in_console(None, "Progress", "".join(["Reading ", name, " results data..."]))
debug_center.show_in_console(None, "Tech", "".join(["Uses ", parser.__class__.__name__, " parser..."]))
cells = parser.load_from_file(path)
make_all_cells_important(cells) # cells cannot use colour temporary
debug_center.show_in_console(None, "Progress", "".join(["Done reading ", name, " result data..."]))
return name, cells
def write_to_file_tracking(stats, path):
data_sets = ([], [], [])
for (f, (p, r, ff)) in stats:
f_short = str(f)[:20]
data_sets[0].append((f_short, p))
data_sets[1].append((f_short, r))
data_sets[2].append((f_short, ff))
write_to_file(data_sets, path)
def write_to_file_segmentation(stats, path):
data_sets = ([], [], [], [])
for (f, (a, b, c, d)) in stats:
f_short = str(f)[:20]
data_sets[0].append((f_short, a))
data_sets[1].append((f_short, b))
data_sets[2].append((f_short, c))
data_sets[3].append((f_short, d))
write_to_file(data_sets, path)
def write_to_file_printable(details, path):
if details:
headers = details[0].csv_headers()
records = [d.csv_record() for d in details]
write_to_csv(headers, records, path)
else:
write_to_csv(["No details!"], [], path)
def format_prF(title, params):
(precision, recall, F) = params
return [title, "Precision: " + str(precision), "Recall: " + str(recall), "F: " + str(F)]
def format_summary(algorithm, segmentation, tracking, long_tracking):
lines = ["Algorithm: " + algorithm]
lines += format_prF("Segmentation:", segmentation[1:])
if len(tracking) != 0:
lines += format_prF("Tracking:", tracking)
if len(long_tracking) != 0:
lines += format_prF("Long-time tracking:", long_tracking)
return "\n".join(lines)
def write_summary(algorithm, segmentation, tracking, long_tracking, path):
file = open(path, "w")
summary = format_summary(algorithm, segmentation, tracking, long_tracking)
file.write(summary)
file.close()
def distance(cell_a, cell_b):
return ((cell_a.position[0] - cell_b.position[0]) ** 2 + (cell_a.position[1] - cell_b.position[1]) ** 2) ** 0.5
def find_correspondence(ground_truth, results):
"""
Greadily match if distance close enough
Input: [Cell] x2
Matching:
[(ground_truth_cell, results_cell)] -> can easily calculate false positives/negatives and cell count + tracking
"""
edges = [(g.similarity(r), (g, r)) for g in ground_truth for r in results if g.is_similar(r, cutoff, cutoff_iou)]
correspondences = []
matchedGT = set([])
matchedRes = set([])
for (d, (a, b)) in sorted(edges, key=lambda x: -x[0]):
if not b in matchedRes:
if not a in matchedGT:
correspondences.append((a, b))
matchedGT.add(a)
matchedRes.add(b)
return correspondences
def calculate_stats_segmentation(ground_truth_frame, results_frame, image_size=(100000, 100000)):
"""
Input: [Cell] x2
Result: (cell_count_results, cell_count_ground_truth, correspondences, false_positives, false_negatives)
"""
load_general_ini(CONFIG_FILE)
border_results = filter_border(results_frame, image_size)
for c in border_results:
c.colour = 1
border_groundtruth = filter_border(ground_truth_frame, image_size)
for c in border_groundtruth:
c.colour = 1
correspondence = find_correspondence(ground_truth_frame, results_frame)
border_correspondence = filter_border(correspondence, image_size)
matched_GT = [gt for gt, _ in correspondence]
matched_res = [res for _, res in correspondence]
matched_border_GT = [gt for gt, _ in border_correspondence]
matched_border_res = [res for _, res in border_correspondence]
correct_results = [SegmentationResult(gt, res) for (gt, res) in correspondence if
(gt, res) not in border_correspondence]
obligatory_results = [res for res in results_frame if res not in border_results and res not in matched_border_res]
obligatory_gt = [gt for gt in ground_truth_frame if gt not in border_groundtruth and gt not in matched_border_GT]
false_negatives = [SegmentationResult(gt, None) for gt in ground_truth_frame if
gt not in border_groundtruth and gt not in matched_GT]
false_positives = [SegmentationResult(None, res) for res in results_frame if
res not in border_results and res not in matched_res]
return (len(obligatory_results), len(obligatory_gt),
correct_results,
false_positives,
false_negatives)
def calculate_precision_recall_F_metrics(algorithm_number, real_number, correct_number):
"""
Result: (precision, recall, F)
"""
if algorithm_number == 0:
precision = 0
else:
precision = float(correct_number) / algorithm_number
if real_number == correct_number: # 0 / 0
recall = 1
else:
recall = float(correct_number) / real_number
return (precision, recall,
2 * float(correct_number) / (real_number + algorithm_number)) # precision*recall/(precision+recall))
def calculate_metrics_segmentation(params):
"""
Input: (cell_count_results, cell_count_ground_truth, correspondences, false_positives, false_negatives)
Result: (cell_count_results/cell_count_ground_truth, precision, recall, F)
"""
(cell_count_results, cell_count_ground_truth, correspondences, false_positives, false_negatives) = params
prf = calculate_precision_recall_F_metrics(cell_count_results, cell_count_ground_truth, correspondences)
if cell_count_ground_truth == 0:
return tuple([0]) + prf
return tuple([float(cell_count_results) / cell_count_ground_truth]) + prf
def calculate_stats_tracking(params_last, last_mapping, params_new, new_mapping):
"""
(found_links, real_links, correct_links, false_positive, false_negative)
1 to 1 correspondence version
"""
(last_gt, last_res) = params_last
(new_gt, new_res) = params_new
# ignore non obligatory GT cells
last_gt = [c for c in last_gt if c.obligatory()]
new_gt = [c for c in new_gt if c.obligatory()]
# leaves only cell from results the ones matched with the obligatory cells or not matched at all
last_res = [c for c in last_res if (last_mapping == [] or (c not in list(zip(*last_mapping))[1])) or (
list(zip(*last_mapping))[0][
list(zip(*last_mapping))[1].index(c)].obligatory())] # searches in [(a,b)] for the a when given b.
new_res = [c for c in new_res if (new_mapping == [] or (c not in list(zip(*new_mapping))[1])) or (
list(zip(*new_mapping))[0][list(zip(*new_mapping))[1].index(c)].obligatory())]
# ignore mapping connected to the non-obligatory GT cells
last_mapping = [(gt, res) for (gt, res) in last_mapping if gt.obligatory()]
new_mapping = [(gt, res) for (gt, res) in new_mapping if gt.obligatory()]
# find links and make pairs of cells in results with the same unique_id in GT
number_change = [(last[1], new[1], last[0], new[0]) for last in last_mapping for new in new_mapping if
last[0].unique_id == new[0].unique_id]
correct_links = [(TrackingLink(l_res, n_res), TrackingLink(l_gt, n_gt)) for (l_res, n_res, l_gt, n_gt) in
number_change
if l_res.unique_id == n_res.unique_id]
# find the number of existing links
real_links = [TrackingLink(last, new) for last in last_gt for new in new_gt if last.unique_id == new.unique_id]
found_links = [TrackingLink(last, new) for last in last_res for new in new_res if last.unique_id == new.unique_id]
correct_results = [TrackingResult(link_gt, link_res) for (link_res, link_gt) in correct_links]
false_negatives = [TrackingResult(gt, None) for gt in real_links if
correct_links == [] or gt not in list(zip(*correct_links))[1]]
false_positives = [TrackingResult(None, res) for res in found_links if
correct_links == [] or res not in list(zip(*correct_links))[0]]
return found_links, real_links, correct_results, false_positives, false_negatives # evaluation_details
def load_general_ini(path):
global cutoff, cutoff_iou, draw_evaluation_details, ignored_frame_size, \
loaded_ini, fill_markers, markersize, all_data_evaluated
if read_ini(path, 'evaluation', 'maxmatchdistance') != '':
cutoff = float(read_ini(path, 'evaluation', 'maxmatchdistance'))
if read_ini(path, 'evaluation', 'miniousimilarity') != '':
cutoff_iou = float(read_ini(path, 'evaluation', 'miniousimilarity'))
if read_ini(path, 'evaluation', 'drawevaluationdetails') != '':
draw_evaluation_details = float(read_ini(path, 'evaluation', 'drawevaluationdetails'))
if read_ini(path, 'evaluation', 'ignoredframesize') != '':
ignored_frame_size = float(read_ini(path, 'evaluation', 'ignoredframesize'))
if read_ini(path, 'evaluation', 'alldataevaluated') != '':
all_data_evaluated = bool(int(read_ini(path, 'evaluation', 'alldataevaluated')))
if read_ini(path, 'details', 'fill_markers') != '':
fill_markers = bool(int(read_ini(path, 'details', 'fill_markers')))
if read_ini(path, 'details', 'markersize') != '':
markersize = int(read_ini(path, 'details', 'markersize'))
def run(ground_truth_csv_file,
algorithm_results_csv_file, algorithm_results_type, algorithm_name=None,
ground_truth_seg_csv_file=None, ground_truth_special_parser=None,
input_directory=None, input_file_part=None,
evaluate_tracking=True, output_summary_stdout=False):
global ground_truth_parser, output_evaluation_details, wide_plots
input_file_part = input_file_part or ""
algorithm_name = algorithm_name or "Algorithm"
if ground_truth_special_parser is not None:
ground_truth_parser = input_type[ground_truth_special_parser]
ground_truth_seg_csv_file = ground_truth_seg_csv_file or ground_truth_csv_file
load_general_ini(CONFIG_FILE)
if read_ini(CONFIG_FILE, 'evaluation', 'outputevaluationdetails') != '':
output_evaluation_details = float(read_ini(CONFIG_FILE, 'evaluation', 'outputevaluationdetails'))
if read_ini(CONFIG_FILE, 'plot', 'terminal') != '':
terminal_type = read_ini(CONFIG_FILE, 'plot', 'terminal').strip()
if read_ini(CONFIG_FILE, 'plot', 'wideplots') != '':
wide_plots = bool(int(read_ini(CONFIG_FILE, 'plot', 'wideplots')))
debug_center.configure(CONFIG_FILE)
if algorithm_results_type not in input_type:
debug_center.show_in_console(None, "Error",
"ERROR: " + algorithm_results_type + " is not supported. There are supported types: " + str(
input_type))
sys.exit()
else:
parser = input_type[algorithm_results_type]
debug_center.show_in_console(None, "Info", "".join(["Algorithm name: ", algorithm_name]))
filtered_algorithm_name = ''.join([c for c in algorithm_name if c.isalnum()])
results_data = read_results(algorithm_results_csv_file, parser, algorithm_name)
def read_GT(ground_truth_csv_file, tracking=False):
ground_truth_data = read_ground_truth(ground_truth_csv_file)
# filter data without tracking GT
if tracking:
ground_truth_data = [(f, cell) for (f, cell) in ground_truth_data if cell.has_tracking_data()]
# use all frames with data or just frames where both gt and algo results
gt_set = set([item[0] for item in ground_truth_data])
res_set = set([item[0] for item in results_data[1]])
list_of_frames = sorted(gt_set | res_set if all_data_evaluated else gt_set & res_set)
if list_of_frames == []:
debug_center.show_in_console(None, "Error",
"ERROR: No ground truth data! Intersection of ground truth and results is empty!")
sys.exit()
data_per_frame = dict([(frame, ([g[1] for g in ground_truth_data if g[0] == frame],
[r[1] for r in results_data[1] if
r[0] == frame and not (tracking and not r[1].has_tracking_data())]))
for frame in list_of_frames])
return ground_truth_data, list_of_frames, data_per_frame
ground_truth_data, list_of_frames, data_per_frame = read_GT(ground_truth_seg_csv_file)
debug_center.show_in_console(None, "Progress", "Evaluating segmentation...")
stats = []
segmentation_details = []
image_sizes = {}
if output_evaluation_details and draw_evaluation_details:
overlord = draw_details.EvaluationDetails(SEGDETAILS_SUFFIX, input_file_part)
image_sizes = draw_details.get_images_sizes(overlord, input_directory)
for frame in list_of_frames:
image_size = image_sizes.get(frame, (100000, 100000))
(cr, cg, corr, fp, fn) = calculate_stats_segmentation(data_per_frame[frame][0], data_per_frame[frame][1],
image_size)
segmentation_details += (corr, fp, fn)
stats.append((frame, (cr, cg, len(corr), len(fp), len(fn))))
(crs, cgs, corrs) = (0, 0, 0)
for (f, (cr, cg, corr, fp, fn)) in stats:
crs += cr
cgs += cg
corrs += corr
results_seg_summary = calculate_metrics_segmentation((crs, cgs, corrs, 0, 0))
debug_center.show_in_console(None, "Progress", "Done evaluating segmentation...")
summary_path = algorithm_results_csv_file + "." + filtered_algorithm_name + SUMMARY_SUFFIX
tmp_path = algorithm_results_csv_file + "." + filtered_algorithm_name + SEGPLOTDATA_SUFFIX
plot_path = algorithm_results_csv_file + "." + filtered_algorithm_name + SEGPLOT_SUFFIX
details_path = algorithm_results_csv_file + "." + filtered_algorithm_name + SEGDETAILS_SUFFIX
debug_center.show_in_console(None, "Progress", "Ploting segmentation results...")
write_to_file_segmentation([(stat[0], calculate_metrics_segmentation(stat[1])) for stat in stats], tmp_path)
plot_file = package_path(SEGMENTATION_GNUPLOT_FILE)
with Plotter(terminal_type, plot_file, algorithm_name) as plotter:
plotter.setup_ploting_area(wide_plots, stats)
plotter.plot_it(tmp_path, plot_path)
debug_center.show_in_console(None, "Progress", "Done ploting segmentation results...")
if output_evaluation_details:
debug_center.show_in_console(None, "Progress", "Printing detailed segmentation results...")
write_to_file_printable(reduce_plus(segmentation_details), details_path)
debug_center.show_in_console(None, "Progress", "Done printing detailed segmentation results...")
if draw_evaluation_details:
if not (input_directory is None or input_file_part is None):
debug_center.show_in_console(None, "Progress", "Drawing detailed segmentation results...")
output_file_prefix = "SegDetails_"
overlord = draw_details.EvaluationDetails(details_path,
required_substring=input_file_part,
fill_markers=fill_markers,
markersize=markersize)
output_drawings_directory = ensure_directory_in(details_path, SEG_DRAWING_FOLDER)
draw_details.run(overlord, input_directory, output_drawings_directory, output_file_prefix)
debug_center.show_in_console(None, "Progress", "Done drawing detailed segmentation results...")
else:
debug_center.show_in_console(None, "Info",
"Skipping evaluation details drawing despite parameters as no input images were provided.")
else:
debug_center.show_in_console(None, "Info", "Skipping evaluation details printing as desired by parameters.")
if evaluate_tracking == 1:
ground_truth_data, list_of_frames, data_per_frame = read_GT(ground_truth_csv_file, True)
debug_center.show_in_console(None, "Progress", "Evaluating tracking...")
stats_tracking = []
tracking_details = []
data = data_per_frame[list_of_frames[0]]
last_data = data
last_correspondence = find_correspondence(data[0], data[1])
# last_frame_id = list_of_frames[0]
# collect all evalustion details
for frame in list_of_frames[1:]:
data = data_per_frame[frame]
new_correspondence = find_correspondence(data[0], data[1])
(tcr, tcg, tcorr, tfp, tfn) = calculate_stats_tracking(last_data, last_correspondence, data,
new_correspondence)
tracking_details += (tcorr, tfp, tfn)
stats_tracking.append((frame, (len(tcr), len(tcg), len(tcorr))))
last_correspondence = new_correspondence
last_data = data
(tcrs, tcgs, tcorrs) = (0, 0, 0)
for (f, (tcr, tcg, tcorr)) in stats_tracking:
tcrs += tcr
tcgs += tcg
tcorrs += tcorr
results_track_summary = calculate_precision_recall_F_metrics(tcrs, tcgs, tcorrs)
debug_center.show_in_console(None, "Progress", "Done evaluating tracking...")
tmp_path = algorithm_results_csv_file + "." + filtered_algorithm_name + TRACKPLOTDATA_SUFFIX
plot_path = algorithm_results_csv_file + "." + filtered_algorithm_name + TRACKPLOT_SUFFIX
details_path = algorithm_results_csv_file + "." + filtered_algorithm_name + TRACKDETAILS_SUFFIX
debug_center.show_in_console(None, "Progress", "Ploting tracking results...")
write_to_file_tracking(
[(stat[0], calculate_precision_recall_F_metrics(*stat[1])) for stat in stats_tracking], tmp_path)
plot_file = package_path(TRACKING_GNUPLOT_FILE)
with Plotter(terminal_type, plot_file, algorithm_name) as plotter:
plotter.setup_ploting_area(wide_plots, stats_tracking)
plotter.plot_it(tmp_path, plot_path)
debug_center.show_in_console(None, "Progress", "Done ploting tracking results...")
if output_evaluation_details:
debug_center.show_in_console(None, "Progress", "Printing detailed tracking results...")
write_to_file_printable(reduce_plus(tracking_details), details_path)
debug_center.show_in_console(None, "Progress", "Done printing detailed tracking results...")
if draw_evaluation_details:
if not (input_directory is None or input_file_part is None):
debug_center.show_in_console(None, "Progress", "Drawing detailed tracking results...")
output_file_prefix = "TrackDetails_"
overlord = draw_details.EvaluationDetails(details_path,
required_substring=input_file_part,
fill_markers=fill_markers,
markersize=markersize)
output_drawings_directory = ensure_directory_in(details_path, TRACK_DRAWING_FOLDER)
draw_details.run(overlord, input_directory, output_drawings_directory, output_file_prefix)
debug_center.show_in_console(None, "Progress", "Done drawing detailed tracking results...")
else:
debug_center.show_in_console(None, "Info",
"Skipping evaluation details drawing despite parameters as no input images were provided.")
else:
debug_center.show_in_console(None, "Info",
"Skipping evaluation details printing as desired by parameters.")
# Calculate additional long-time tracking measure
if len(data_per_frame) > 2:
debug_center.show_in_console(None, "Progress", "Evaluating long-time tracking...")
long_tracking_details = []
first_data = data_per_frame[list_of_frames[0]]
first_correspondence = find_correspondence(first_data[0], first_data[1])
last_data = data_per_frame[list_of_frames[-1]]
last_correspondence = find_correspondence(last_data[0], last_data[1])
(lcr, lcg, lcorr, lfp, lfn) = calculate_stats_tracking(first_data, first_correspondence, last_data,
last_correspondence)
results_long_track_summary = calculate_precision_recall_F_metrics(len(lcr), len(lcg), len(lcorr))
long_tracking_details += (lcorr, lfp, lfn)
details_path = algorithm_results_csv_file + "." + filtered_algorithm_name + LONGTRACKDETAILS_SUFFIX
if output_evaluation_details:
debug_center.show_in_console(None, "Progress", "Printing detailed long-time tracking results...")
write_to_file_printable(reduce_plus(long_tracking_details), details_path)
debug_center.show_in_console(None, "Progress",
"Done printing detailed long-time tracking results...")
if draw_evaluation_details:
if not (input_directory is None or input_file_part is None):
debug_center.show_in_console(None, "Progress",
"Drawing detailed long-time tracking results...")
output_file_prefix = "LongTrackDetails_"
overlord = draw_details.EvaluationDetails(details_path,
required_substring=input_file_part,
fill_markers=fill_markers,
markersize=markersize)
output_drawings_directory = ensure_directory_in(details_path, LONG_DRAWING_FOLDER)
draw_details.run(overlord, input_directory, output_drawings_directory, output_file_prefix)
debug_center.show_in_console(None, "Progress",
"Done drawing detailed long-time tracking results...")
else:
debug_center.show_in_console(None, "Info",
"Skipping evaluation details drawing despite parameters as no input images were provided.")
else:
debug_center.show_in_console(None, "Info",
"Skipping evaluation details printing as desired by parameters.")
debug_center.show_in_console(None, "Progress", "Done evaluating long-time tracking...")
else:
debug_center.show_in_console(None, "Info",
"Skipping long-time tracking evaluation because there are too few frames.")
results_long_track_summary = []
else:
debug_center.show_in_console(None, "Info", "Skipping tracking evaluation as desired by parameters.")
results_track_summary = []
results_long_track_summary = []
# save all the evaluation details if chosen to do so
# plot the results if /PLOT directory + name
write_summary(algorithm_name, results_seg_summary, results_track_summary, results_long_track_summary,
summary_path)
debug_center.show_in_console(None, "Progress", "Done evaluating...")
if output_summary_stdout:
debug_center.show_in_console(None, "Result",
format_summary(algorithm_name, results_seg_summary, results_track_summary,
results_long_track_summary))
if __name__ == '__main__':
args = sys.argv
if any(["/" in a for a in args]) or args[1] == 'legacy':
if args[1] == 'legacy':
args = args[:1] + args[2:]
params = plot_comparison_legacy_parse(args)
run(**params)
else:
fire.Fire(run)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DscpConfigurationOperations:
"""DscpConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
dscp_configuration_name: str,
parameters: "_models.DscpConfiguration",
**kwargs: Any
) -> "_models.DscpConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DscpConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
dscp_configuration_name: str,
parameters: "_models.DscpConfiguration",
**kwargs: Any
) -> AsyncLROPoller["_models.DscpConfiguration"]:
"""Creates or updates a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:param parameters: Parameters supplied to the create or update dscp configuration operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.DscpConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DscpConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.DscpConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
dscp_configuration_name=dscp_configuration_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
dscp_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
dscp_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
dscp_configuration_name=dscp_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
dscp_configuration_name: str,
**kwargs: Any
) -> "_models.DscpConfiguration":
"""Gets a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DscpConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.DscpConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DscpConfigurationListResult"]:
"""Gets a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DscpConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.DscpConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DscpConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.DscpConfigurationListResult"]:
"""Gets all dscp configurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DscpConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.DscpConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DscpConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/dscpConfigurations'} # type: ignore
|
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from language.chameleon import ast, cfg
from . dfs import CFGDFS
from . import simplify
def memoizeMethod(getter):
def memodecorator(func):
def memowrap(self, *args):
cache = getter(self)
if args not in cache:
result = func(self, *args)
cache[args] = result
else:
result = cache[args]
return result
return memowrap
return memodecorator
class ASTCloner(TypeDispatcher):
def __init__(self, origin):
self.origin = origin
self.cache = {}
def adjustOrigin(self, node):
origin = node.annotation.origin
if origin is None:
origin = [None]
node.rewriteAnnotation(origin=self.origin + origin)
return node
@dispatch(str, type(None))
def visitLeaf(self, node):
return node
@dispatch(ast.Local)
@memoizeMethod(lambda self: self.cache)
def visitLocal(self, node):
result = ast.Local(self(node.type), node.name)
result.annotation = node.annotation
return self.adjustOrigin(result)
@dispatch(ast.Existing)
def visitExisting(self, node):
return node.clone()
@dispatch(ast.Assign, ast.Discard, ast.AugmentedAssign,
ast.Special, ast.BinaryOp, ast.Call, ast.Phi)
def visitOK(self, node):
return self.adjustOrigin(node.rewriteChildren(self))
class CFGClonerPre(TypeDispatcher):
def __init__(self, astcloner):
self.astcloner = astcloner
self.cache = {}
@dispatch(cfg.Entry, cfg.Exit, cfg.Yield)
def visitEntry(self, node):
return type(node)()
@dispatch(cfg.Merge)
def visitMerge(self, node):
merge = cfg.Merge()
merge.phi = [self.astcloner(phi) for phi in node.phi]
return merge
@dispatch(cfg.Switch)
def visitSwitch(self, node):
return cfg.Switch(self.astcloner(node.condition))
@dispatch(cfg.Suite)
def visitSuite(self, node):
suite = cfg.Suite()
for op in node.ops:
suite.ops.append(self.astcloner(op))
return suite
def __call__(self, node):
self.cache[node] = TypeDispatcher.__call__(self, node)
class CFGClonerPost(TypeDispatcher):
@defaultdispatch
def visitEntry(self, node):
replace = self.cache[node]
for name, next in node.next.iteritems():
replace.clonedExit(name, self.cache[next])
for prev, name in node.iterprev():
replace.clonedPrev(self.cache[prev], name)
class CFGCloner(object):
def __init__(self, origin):
self.cloner = CFGDFS(CFGClonerPre(ASTCloner(origin)), CFGClonerPost())
self.cloner.post.cache = self.cloner.pre.cache
self.cfgCache = self.cloner.pre.cache
self.lcl = self.cloner.pre.astcloner
def process(self, g):
self.cloner.process(g.entryTerminal)
newG = cfg.Code()
# HACK create an empty behavior?
newG.code = ast.BehaviorDecl(g.code.name+'_clone', [self.lcl(p) for p in g.code.params], g.code.returnType, ast.Suite([]))
newG.returnParam = self.lcl(g.returnParam)
newG.entryTerminal = self.cfgCache[g.entryTerminal]
newG.normalTerminal = self.cfgCache.get(g.normalTerminal, newG.normalTerminal)
newG.failTerminal = self.cfgCache.get(g.failTerminal, newG.failTerminal)
newG.errorTerminal = self.cfgCache.get(g.errorTerminal, newG.errorTerminal)
return newG
class InlineTransform(TypeDispatcher):
def __init__(self, compiler, g, lut):
self.compiler = compiler
self.g = g
self.lut = lut
@dispatch(cfg.Entry, cfg.Exit, cfg.Merge, cfg.Yield)
def visitOK(self, node):
pass
@dispatch(cfg.Switch)
def visitSwitch(self, node):
pass
@dispatch(cfg.Suite)
def visitSuite(self, node):
failTerminal = cfg.Merge() if node.getExit('fail') else None
errorTerminal = cfg.Merge() if node.getExit('error') else None
def makeSuite():
suite = cfg.Suite()
suite.setExit('fail', failTerminal)
suite.setExit('error', errorTerminal)
return suite
head = makeSuite()
current = head
inlined = False
for op in node.ops:
invokes = self.getInline(op)
if invokes is not None:
inlined = True
call = op.expr
cloner = CFGCloner(call.annotation.origin)
cloned = cloner.process(invokes)
print "\t", invokes.code.name
# PREAMBLE, evaluate arguments
for p, a in zip(cloned.code.params, call.arguments):
current.ops.append(ast.Assign(p, a))
# Connect into the cloned code
current.transferExit('normal', cloned.entryTerminal, 'entry')
current.simplify()
cloned.failTerminal.redirectEntries(failTerminal)
cloned.errorTerminal.redirectEntries(errorTerminal)
# Connect the normal output
if cloned.normalTerminal.prev:
current = makeSuite()
cloned.normalTerminal.redirectEntries(current)
else:
current = None
break
# POSTAMBLE transfer the return value
if isinstance(op, ast.Assign):
current.ops.append(ast.Assign(op.target, cloned.returnParam))
elif isinstance(op, ast.AugmentedAssign):
current.ops.append(ast.AugmentedAssign(op.target, op.op, cloned.returnParam))
else:
current.ops.append(op)
if inlined:
# Inlining was performed, commit changes
node.redirectEntries(head)
# Redirect the outputs
if current:
if node.getExit('normal'):
current.transferExit('normal', node, 'normal')
current.simplify()
if node.getExit('fail'):
failTerminal.transferExit('normal', node, 'fail')
failTerminal.simplify()
if node.getExit('error'):
errorTerminal.transferExit('normal', node, 'error')
errorTerminal.simplify()
def getInline(self, stmt):
if isinstance(stmt, (ast.Assign, ast.Discard, ast.AugmentedAssign)):
expr = stmt.expr
if isinstance(expr, ast.Call):
expr = expr.expr
if isinstance(expr, ast.Existing):
if expr.object.data in self.lut:
return self.lut[expr.object.data]
return None
def evaluate(compiler, g, lut):
transform = CFGDFS(post=InlineTransform(compiler, g, lut))
transform.process(g.entryTerminal)
simplify.evaluate(compiler, g)
|
|
import logging
import operator
import os
import pickle
import threading
from collections import deque, namedtuple
from functools import reduce
from itertools import chain
from typing import Optional
import numpy as np
from django.conf import settings
from django.core.cache import cache
from django.utils.functional import cached_property
from scipy.sparse.csgraph._shortest_path import shortest_path
from shapely import prepared
from shapely.geometry import LineString, Point
from shapely.ops import unary_union
from c3nav.mapdata.models import AltitudeArea, Area, GraphEdge, Level, LocationGroup, MapUpdate, Space, WayType
from c3nav.mapdata.models.geometry.space import POI, CrossDescription, LeaveDescription
from c3nav.mapdata.models.locations import CustomLocationProxyMixin
from c3nav.mapdata.utils.geometry import assert_multipolygon, get_rings, good_representative_point
from c3nav.mapdata.utils.locations import CustomLocation
from c3nav.routing.exceptions import LocationUnreachable, NoRouteFound, NotYetRoutable
from c3nav.routing.route import Route
logger = logging.getLogger('c3nav')
class Router:
filename = os.path.join(settings.CACHE_ROOT, 'router')
def __init__(self, levels, spaces, areas, pois, groups, restrictions, nodes, edges, waytypes, graph):
self.levels = levels
self.spaces = spaces
self.areas = areas
self.pois = pois
self.groups = groups
self.restrictions = restrictions
self.nodes = nodes
self.edges = edges
self.waytypes = waytypes
self.graph = graph
@staticmethod
def get_altitude_in_areas(areas, point):
return max(area.get_altitudes(point)[0] for area in areas if area.geometry_prep.intersects(point))
@classmethod
def rebuild(cls, update):
levels_query = Level.objects.prefetch_related('buildings', 'spaces', 'altitudeareas', 'groups',
'spaces__holes', 'spaces__columns', 'spaces__groups',
'spaces__obstacles', 'spaces__lineobstacles',
'spaces__graphnodes', 'spaces__areas', 'spaces__areas__groups',
'spaces__pois', 'spaces__pois__groups')
levels = {}
spaces = {}
areas = {}
pois = {}
groups = {}
restrictions = {}
nodes = deque()
for level in levels_query:
buildings_geom = unary_union(tuple(building.geometry for building in level.buildings.all()))
nodes_before_count = len(nodes)
for group in level.groups.all():
groups.setdefault(group.pk, {}).setdefault('levels', set()).add(level.pk)
if level.access_restriction_id:
restrictions.setdefault(level.access_restriction_id, RouterRestriction()).spaces.update(
space.pk for space in level.spaces.all()
)
for space in level.spaces.all():
# create space geometries
accessible_geom = space.geometry.difference(unary_union(
tuple(column.geometry for column in space.columns.all() if column.access_restriction_id is None) +
tuple(hole.geometry for hole in space.holes.all()) +
((buildings_geom, ) if space.outside else ())
))
obstacles_geom = unary_union(
tuple(obstacle.geometry for obstacle in space.obstacles.all()) +
tuple(lineobstacle.buffered_geometry for lineobstacle in space.lineobstacles.all())
)
clear_geom = unary_union(tuple(get_rings(accessible_geom.difference(obstacles_geom))))
clear_geom_prep = prepared.prep(clear_geom)
for group in space.groups.all():
groups.setdefault(group.pk, {}).setdefault('spaces', set()).add(space.pk)
if space.access_restriction_id:
restrictions.setdefault(space.access_restriction_id, RouterRestriction()).spaces.add(space.pk)
space_nodes = tuple(RouterNode.from_graph_node(node, i)
for i, node in enumerate(space.graphnodes.all()))
for i, node in enumerate(space_nodes, start=len(nodes)):
node.i = i
nodes.extend(space_nodes)
space_obj = space
space = RouterSpace(space)
space.nodes = set(node.i for node in space_nodes)
for area in space_obj.areas.all():
for group in area.groups.all():
groups.setdefault(group.pk, {}).setdefault('areas', set()).add(area.pk)
area._prefetched_objects_cache = {}
area = RouterArea(area)
area_nodes = tuple(node for node in space_nodes if area.geometry_prep.intersects(node.point))
area.nodes = set(node.i for node in area_nodes)
for node in area_nodes:
node.areas.add(area.pk)
if not area.nodes and space_nodes:
nearest_node = min(space_nodes, key=lambda node: area.geometry.distance(node.point))
area.nodes.add(nearest_node.i)
areas[area.pk] = area
space.areas.add(area.pk)
for area in level.altitudeareas.all():
if not space.geometry_prep.intersects(area.geometry):
continue
for subgeom in assert_multipolygon(accessible_geom.intersection(area.geometry)):
if subgeom.is_empty:
continue
area_clear_geom = unary_union(tuple(get_rings(subgeom.difference(obstacles_geom))))
if area_clear_geom.is_empty:
continue
area = RouterAltitudeArea(subgeom, area_clear_geom,
area.altitude, area.altitude2, area.point1, area.point2)
area_nodes = tuple(node for node in space_nodes if area.geometry_prep.intersects(node.point))
area.nodes = set(node.i for node in area_nodes)
for node in area_nodes:
altitude = area.get_altitude(node)
if node.altitude is None or node.altitude < altitude:
node.altitude = altitude
space.altitudeareas.append(area)
for node in space_nodes:
if node.altitude is not None:
continue
logger.warning('Node %d in space %d is not inside an altitude area' % (node.pk, space.pk))
node_altitudearea = min(space.altitudeareas,
key=lambda a: a.geometry.distance(node.point), default=None)
if node_altitudearea:
node.altitude = node_altitudearea.get_altitude(node)
else:
node.altitude = float(level.base_altitude)
logger.info('Space %d has no altitude areas' % space.pk)
for area in space.altitudeareas:
# create fallback nodes
if not area.nodes and space_nodes:
fallback_point = good_representative_point(area.clear_geometry)
fallback_node = RouterNode(None, None, fallback_point.x, fallback_point.y,
space.pk, area.get_altitude(fallback_point))
# todo: check waytypes here
for node in space_nodes:
line = LineString([(node.x, node.y), (fallback_node.x, fallback_node.y)])
if line.length < 5 and not clear_geom_prep.intersects(line):
area.fallback_nodes[node.i] = (
fallback_node,
RouterEdge(fallback_node, node, 0)
)
if not area.fallback_nodes:
nearest_node = min(space_nodes, key=lambda node: fallback_point.distance(node.point))
area.fallback_nodes[nearest_node.i] = (
fallback_node,
RouterEdge(fallback_node, nearest_node, 0)
)
for poi in space_obj.pois.all():
for group in poi.groups.all():
groups.setdefault(group.pk, {}).setdefault('pois', set()).add(poi.pk)
poi._prefetched_objects_cache = {}
poi = RouterPoint(poi)
try:
altitudearea = space.altitudearea_for_point(poi.geometry)
poi.altitude = altitudearea.get_altitude(poi.geometry)
poi_nodes = altitudearea.nodes_for_point(poi.geometry, all_nodes=nodes)
except LocationUnreachable:
poi_nodes = {}
poi.nodes = set(i for i in poi_nodes.keys())
poi.nodes_addition = poi_nodes
pois[poi.pk] = poi
space.pois.add(poi.pk)
for column in space_obj.columns.all():
if column.access_restriction_id is None:
continue
column.geometry_prep = prepared.prep(column.geometry)
column_nodes = tuple(node for node in space_nodes if column.geometry_prep.intersects(node.point))
column_nodes = set(node.i for node in column_nodes)
restrictions.setdefault(column.access_restriction_id,
RouterRestriction()).additional_nodes.update(column_nodes)
space_obj._prefetched_objects_cache = {}
space.src.geometry = accessible_geom
spaces[space.pk] = space
level_spaces = set(space.pk for space in level.spaces.all())
level._prefetched_objects_cache = {}
level = RouterLevel(level, spaces=level_spaces)
level.nodes = set(range(nodes_before_count, len(nodes)))
levels[level.pk] = level
# add graph descriptions
for description in LeaveDescription.objects.all():
spaces[description.space_id].leave_descriptions[description.target_space_id] = description.description
for description in CrossDescription.objects.all():
spaces[description.space_id].cross_descriptions[(description.origin_space_id,
description.target_space_id)] = description.description
# waytypes
waytypes = deque([RouterWayType(None)])
waytypes_lookup = {None: 0}
for i, waytype in enumerate(WayType.objects.all(), start=1):
waytypes.append(RouterWayType(waytype))
waytypes_lookup[waytype.pk] = i
waytypes = tuple(waytypes)
# collect nodes
nodes = tuple(nodes)
nodes_lookup = {node.pk: node.i for node in nodes}
# collect edges
edges = tuple(RouterEdge(from_node=nodes[nodes_lookup[edge.from_node_id]],
to_node=nodes[nodes_lookup[edge.to_node_id]],
waytype=waytypes_lookup[edge.waytype_id],
access_restriction=edge.access_restriction_id) for edge in GraphEdge.objects.all())
edges = {(edge.from_node, edge.to_node): edge for edge in edges}
# build graph matrix
graph = np.full(shape=(len(nodes), len(nodes)), fill_value=np.inf, dtype=np.float32)
for edge in edges.values():
index = (edge.from_node, edge.to_node)
graph[index] = edge.distance
waytype = waytypes[edge.waytype]
(waytype.upwards_indices if edge.rise > 0 else waytype.nonupwards_indices).append(index)
if edge.access_restriction:
restrictions.setdefault(edge.access_restriction, RouterRestriction()).edges.append(index)
# respect slow_down_factor
for area in areas.values():
if area.slow_down_factor != 1:
area_nodes = np.array(tuple(area.nodes), dtype=np.uint32)
graph[area_nodes.reshape((-1, 1)), area_nodes] *= float(area.slow_down_factor)
# finalize waytype matrixes
for waytype in waytypes:
waytype.upwards_indices = np.array(waytype.upwards_indices, dtype=np.uint32).reshape((-1, 2))
waytype.nonupwards_indices = np.array(waytype.nonupwards_indices, dtype=np.uint32).reshape((-1, 2))
# finalize restriction edge matrixes
for restriction in restrictions.values():
restriction.edges = np.array(restriction.edges, dtype=np.uint32).reshape((-1, 2))
router = cls(levels, spaces, areas, pois, groups, restrictions, nodes, edges, waytypes, graph)
pickle.dump(router, open(cls.build_filename(update), 'wb'))
return router
@classmethod
def build_filename(cls, update):
return os.path.join(settings.CACHE_ROOT, 'router_%s.pickle' % MapUpdate.build_cache_key(*update))
@classmethod
def load_nocache(cls, update):
return pickle.load(open(cls.build_filename(update), 'rb'))
cached = None
cache_update = None
cache_lock = threading.Lock()
@classmethod
def load(cls):
from c3nav.mapdata.models import MapUpdate
update = MapUpdate.last_processed_update()
if cls.cache_update != update:
with cls.cache_lock:
cls.cache_update = update
cls.cached = cls.load_nocache(update)
return cls.cached
def get_locations(self, location, restrictions):
locations = ()
if isinstance(location, Level):
if location.access_restriction_id not in restrictions:
if location.pk not in self.levels:
raise NotYetRoutable
locations = (self.levels[location.pk], )
elif isinstance(location, Space):
if location.pk not in restrictions.spaces:
if location.pk not in self.spaces:
raise NotYetRoutable
locations = (self.spaces[location.pk], )
elif isinstance(location, Area):
if location.space_id not in restrictions.spaces and location.access_restriction_id not in restrictions:
if location.pk not in self.areas:
raise NotYetRoutable
locations = (self.areas[location.pk], )
elif isinstance(location, POI):
if location.space_id not in restrictions.spaces and location.access_restriction_id not in restrictions:
if location.pk not in self.pois:
raise NotYetRoutable
locations = (self.pois[location.pk], )
elif isinstance(location, LocationGroup):
if location.pk not in self.groups:
raise NotYetRoutable
group = self.groups[location.pk]
locations = tuple(chain(
(level for level in (self.levels[pk] for pk in group.get('levels', ()))
if level.access_restriction_id not in restrictions),
(space for space in (self.spaces[pk] for pk in group.get('spaces', ()))
if space.pk not in restrictions.spaces),
(area for area in (self.areas[pk] for pk in group.get('areas', ()))
if area.space_id not in restrictions.spaces and area.access_restriction_id not in restrictions),
(poi for poi in (self.pois[pk] for pk in group.get('pois', ()))
if poi.space_id not in restrictions.spaces and poi.access_restriction_id not in restrictions),
))
elif isinstance(location, (CustomLocation, CustomLocationProxyMixin)):
if isinstance(location, CustomLocationProxyMixin) and not location.available:
raise LocationUnreachable
point = Point(location.x, location.y)
location = RouterPoint(location)
space = self.space_for_point(location.level.pk, point, restrictions)
if space is None:
raise LocationUnreachable
altitudearea = space.altitudearea_for_point(point)
location.altitude = altitudearea.get_altitude(point)
location_nodes = altitudearea.nodes_for_point(point, all_nodes=self.nodes)
location.nodes = set(i for i in location_nodes.keys())
location.nodes_addition = location_nodes
locations = tuple((location, ))
result = RouterLocation(locations)
if not result.nodes:
raise LocationUnreachable
return result
def space_for_point(self, level, point, restrictions) -> Optional['RouterSpace']:
point = Point(point.x, point.y)
level = self.levels[level]
excluded_spaces = restrictions.spaces if restrictions else ()
for space in level.spaces:
if space in excluded_spaces:
continue
if self.spaces[space].geometry_prep.contains(point):
return self.spaces[space]
spaces = (self.spaces[space] for space in level.spaces if space not in excluded_spaces)
spaces = ((space, space.geometry.distance(point)) for space in spaces)
spaces = tuple((space, distance) for space, distance in spaces if distance < 20)
if not spaces:
return None
return min(spaces, key=operator.itemgetter(1))[0]
def describe_custom_location(self, location):
restrictions = self.get_restrictions(location.permissions)
space = self.space_for_point(level=location.level.pk, point=location, restrictions=restrictions)
if not space:
return CustomLocationDescription(space=space, altitude=None, areas=(), near_area=None, near_poi=None,
nearby=())
try:
altitude = space.altitudearea_for_point(location).get_altitude(location)
except LocationUnreachable:
altitude = None
areas, near_area, nearby_areas = space.areas_for_point(
areas=self.areas, point=location, restrictions=restrictions
)
near_poi, nearby_pois = space.poi_for_point(
pois=self.pois, point=location, restrictions=restrictions
)
nearby = tuple(sorted(
tuple(l for l in nearby_areas+nearby_pois if l[0].can_search),
key=operator.itemgetter(1)
))
# show all location within 5 meters, but at least 20
min_i = len(nearby)+1
for i, (location, distance) in enumerate(nearby):
if distance > 5:
min_i = i
nearby = tuple(location for location, distance in nearby[:max(20, min_i)])
return CustomLocationDescription(space=space, altitude=altitude,
areas=areas, near_area=near_area, near_poi=near_poi, nearby=nearby)
def shortest_path(self, restrictions, options):
options_key = options.serialize_string()
cache_key = 'router:shortest_path:%s:%s:%s' % (MapUpdate.current_processed_cache_key(),
restrictions.cache_key,
options_key)
result = cache.get(cache_key)
if result:
distances, predecessors = result
return (np.frombuffer(distances, dtype=np.float64).reshape(self.graph.shape),
np.frombuffer(predecessors, dtype=np.int32).reshape(self.graph.shape))
graph = self.graph.copy()
# speeds of waytypes, if relevant
if options['mode'] == 'fastest':
self.waytypes[0].speed = 1
self.waytypes[0].speed_up = 1
self.waytypes[0].extra_seconds = 0
self.waytypes[0].walk = True
for waytype in self.waytypes:
speed = float(waytype.speed)
speed_up = float(waytype.speed_up)
if waytype.walk:
speed *= options.walk_factor
speed_up *= options.walk_factor
for indices, dir_speed in ((waytype.nonupwards_indices, speed), (waytype.upwards_indices, speed_up)):
indices = indices.transpose().tolist()
values = graph[indices]
values /= dir_speed
if waytype.extra_seconds:
values += int(waytype.extra_seconds)
graph[indices] = values
# avoid waytypes as specified in settings
for waytype in self.waytypes[1:]:
value = options.get('waytype_%s' % waytype.pk, 'allow')
if value in ('avoid', 'avoid_up'):
graph[waytype.upwards_indices.transpose().tolist()] *= 100000
if value in ('avoid', 'avoid_down'):
graph[waytype.nonupwards_indices.transpose().tolist()] *= 100000
# exclude spaces and edges
space_nodes = tuple(reduce(operator.or_, (self.spaces[space].nodes for space in restrictions.spaces), set()))
graph[space_nodes, :] = np.inf
graph[:, space_nodes] = np.inf
if restrictions.additional_nodes:
graph[tuple(restrictions.additional_nodes), :] = np.inf
graph[:, tuple(restrictions.additional_nodes)] = np.inf
graph[restrictions.edges.transpose().tolist()] = np.inf
distances, predecessors = shortest_path(graph, directed=True, return_predecessors=True)
cache.set(cache_key, (distances.astype(np.float64).tobytes(),
predecessors.astype(np.int32).tobytes()), 600)
return distances, predecessors
def get_restrictions(self, permissions):
return RouterRestrictionSet({
pk: restriction for pk, restriction in self.restrictions.items() if pk not in permissions
})
def get_route(self, origin, destination, permissions, options):
restrictions = self.get_restrictions(permissions)
# get possible origins and destinations
origins = self.get_locations(origin, restrictions)
destinations = self.get_locations(destination, restrictions)
# calculate shortest path matrix
distances, predecessors = self.shortest_path(restrictions, options=options)
# find shortest path for our origins and destinations
origin_nodes = np.array(tuple(origins.nodes))
destination_nodes = np.array(tuple(destinations.nodes))
origin_node, destination_node = np.unravel_index(
distances[origin_nodes.reshape((-1, 1)), destination_nodes].argmin(),
(len(origin_nodes), len(destination_nodes))
)
origin_node = origin_nodes[origin_node]
destination_node = destination_nodes[destination_node]
if distances[origin_node, destination_node] == np.inf:
raise NoRouteFound
# get best origin and destination
origin = origins.get_location_for_node(origin_node)
destination = destinations.get_location_for_node(destination_node)
# recreate path
path_nodes = deque((destination_node, ))
last_node = destination_node
while last_node != origin_node:
last_node = predecessors[origin_node, last_node]
path_nodes.appendleft(last_node)
path_nodes = tuple(path_nodes)
origin_addition = origin.nodes_addition.get(origin_node)
destination_addition = destination.nodes_addition.get(destination_node)
# get additional distance at origin and destination
origin_xyz = origin.xyz if isinstance(origin, RouterPoint) else None
destination_xyz = destination.xyz if isinstance(destination, RouterPoint) else None
return Route(self, origin, destination, path_nodes, options,
origin_addition, destination_addition, origin_xyz, destination_xyz)
CustomLocationDescription = namedtuple('CustomLocationDescription', ('space', 'altitude',
'areas', 'near_area', 'near_poi', 'nearby'))
class BaseRouterProxy:
def __init__(self, src):
self.src = src
self.nodes = set()
self.nodes_addition = {}
@cached_property
def geometry_prep(self):
return prepared.prep(self.src.geometry)
def __getstate__(self):
result = self.__dict__.copy()
result.pop('geometry_prep', None)
return result
def __getattr__(self, name):
if name == '__setstate__':
raise AttributeError
return getattr(self.src, name)
class RouterLevel(BaseRouterProxy):
def __init__(self, level, spaces=None):
super().__init__(level)
self.spaces = spaces if spaces else set()
class RouterSpace(BaseRouterProxy):
def __init__(self, space, altitudeareas=None):
super().__init__(space)
self.areas = set()
self.pois = set()
self.altitudeareas = altitudeareas if altitudeareas else []
self.leave_descriptions = {}
self.cross_descriptions = {}
def altitudearea_for_point(self, point):
point = Point(point.x, point.y)
if not self.altitudeareas:
raise LocationUnreachable
for area in self.altitudeareas:
if area.geometry_prep.intersects(point):
return area
return min(self.altitudeareas, key=lambda area: area.geometry.distance(point))
def areas_for_point(self, areas, point, restrictions):
point = Point(point.x, point.y)
areas = {pk: area for pk, area in areas.items()
if pk in self.areas and area.can_describe and area.access_restriction_id not in restrictions}
nearby = ((area, area.geometry.distance(point)) for area in areas.values())
nearby = tuple((area, distance) for area, distance in nearby if distance < 20)
contained = tuple(area for area in areas.values() if area.geometry_prep.contains(point))
if contained:
return tuple(sorted(contained, key=lambda area: area.geometry.area)), None, nearby
near = tuple((area, distance) for area, distance in nearby if distance < 5)
if not near:
return (), None, nearby
return (), min(near, key=operator.itemgetter(1))[0], nearby
def poi_for_point(self, pois, point, restrictions):
point = Point(point.x, point.y)
pois = {pk: poi for pk, poi in pois.items()
if pk in self.pois and poi.can_describe and poi.access_restriction_id not in restrictions}
nearby = ((poi, poi.geometry.distance(point)) for poi in pois.values())
nearby = tuple((poi, distance) for poi, distance in nearby if distance < 20)
near = tuple((poi, distance) for poi, distance in nearby if distance < 5)
if not near:
return None, nearby
return min(near, key=operator.itemgetter(1))[0], nearby
class RouterArea(BaseRouterProxy):
pass
class RouterPoint(BaseRouterProxy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.altitude = None
@cached_property
def xyz(self):
return np.array((self.x, self.y, self.altitude))
class RouterAltitudeArea:
def __init__(self, geometry, clear_geometry, altitude, altitude2, point1, point2):
self.geometry = geometry
self.clear_geometry = clear_geometry
self.altitude = altitude
self.altitude2 = altitude2
self.point1 = point1
self.point2 = point2
self.nodes = frozenset()
self.fallback_nodes = {}
@cached_property
def geometry_prep(self):
return prepared.prep(self.geometry)
@cached_property
def clear_geometry_prep(self):
return prepared.prep(self.clear_geometry)
def get_altitude(self, point):
# noinspection PyTypeChecker,PyCallByClass
return AltitudeArea.get_altitudes(self, (point.x, point.y))[0]
def nodes_for_point(self, point, all_nodes):
point = Point(point.x, point.y)
nodes = {}
if self.nodes:
for node in self.nodes:
node = all_nodes[node]
line = LineString([(node.x, node.y), (point.x, point.y)])
if line.length < 10 and not self.clear_geometry_prep.intersects(line):
nodes[node.i] = (None, None)
if not nodes:
nearest_node = min(tuple(all_nodes[node] for node in self.nodes),
key=lambda node: point.distance(node.point))
nodes[nearest_node.i] = (None, None)
else:
nodes = self.fallback_nodes
return nodes
def __getstate__(self):
result = self.__dict__.copy()
result.pop('geometry_prep', None)
result.pop('clear_geometry_prep', None)
return result
class RouterNode:
def __init__(self, i, pk, x, y, space, altitude=None, areas=None):
self.i = i
self.pk = pk
self.x = x
self.y = y
self.space = space
self.altitude = altitude
self.areas = areas if areas else set()
@classmethod
def from_graph_node(cls, node, i):
return cls(i, node.pk, node.geometry.x, node.geometry.y, node.space_id)
@cached_property
def point(self):
return Point(self.x, self.y)
@cached_property
def xyz(self):
return np.array((self.x, self.y, self.altitude))
class RouterEdge:
def __init__(self, from_node, to_node, waytype, access_restriction=None, rise=None, distance=None):
self.from_node = from_node.i
self.to_node = to_node.i
self.waytype = waytype
self.access_restriction = access_restriction
if rise is not None:
self.rise = rise
elif to_node.altitude is None or from_node.altitude is None:
self.rise = None
else:
self.rise = (to_node.altitude - from_node.altitude)
self.distance = distance if distance is not None else np.linalg.norm(to_node.xyz - from_node.xyz)
class RouterWayType:
def __init__(self, waytype):
self.src = waytype
self.upwards_indices = deque()
self.nonupwards_indices = deque()
def __getattr__(self, name):
if name in ('__getstate__', '__setstate__'):
raise AttributeError
return getattr(self.src, name)
def get_duration(self, edge, walk_factor):
if edge.rise > 0:
duration = edge.distance / (float(self.speed_up if self.src else 1) * walk_factor)
else:
duration = edge.distance / (float(self.speed if self.src else 1) * walk_factor)
duration += self.extra_seconds if self.src else 0
return duration
class RouterLocation:
def __init__(self, locations=()):
self.locations = locations
@cached_property
def nodes(self):
return reduce(operator.or_, (location.nodes for location in self.locations), frozenset())
def get_location_for_node(self, node):
for location in self.locations:
if node in location.nodes:
return location
return None
class RouterRestriction:
def __init__(self, spaces=None):
self.spaces = spaces if spaces else set()
self.additional_nodes = set()
self.edges = deque()
class RouterRestrictionSet:
def __init__(self, restrictions):
self.restrictions = restrictions
@cached_property
def spaces(self):
return reduce(operator.or_, (restriction.spaces for restriction in self.restrictions.values()), frozenset())
@cached_property
def additional_nodes(self):
return reduce(operator.or_, (restriction.additional_nodes
for restriction in self.restrictions.values()), frozenset())
@cached_property
def edges(self):
if not self.restrictions:
return np.array((), dtype=np.uint32).reshape((-1, 2))
return np.vstack(tuple(restriction.edges for restriction in self.restrictions.values()))
@cached_property
def cache_key(self):
return '%s_%s' % ('-'.join(str(i) for i in self.spaces),
'-'.join(str(i) for i in self.edges.flatten().tolist()))
def __contains__(self, pk):
return pk in self.restrictions
|
|
"""Testing for K-means"""
import re
import sys
import numpy as np
from scipy import sparse as sp
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import threadpool_limits
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics import pairwise_distances
from sklearn.metrics import pairwise_distances_argmin
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means, kmeans_plusplus
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _labels_inertia
from sklearn.cluster._kmeans import _mini_batch_step
from sklearn.cluster._k_means_common import _relocate_empty_clusters_dense
from sklearn.cluster._k_means_common import _relocate_empty_clusters_sparse
from sklearn.cluster._k_means_common import _euclidean_dense_dense_wrapper
from sklearn.cluster._k_means_common import _euclidean_sparse_dense_wrapper
from sklearn.cluster._k_means_common import _inertia_dense
from sklearn.cluster._k_means_common import _inertia_sparse
from sklearn.cluster._k_means_common import _is_same_clustering
from sklearn.datasets import make_blobs
from io import StringIO
# non centered, sparse centers to check the
centers = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
]
)
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
)
X_csr = sp.csr_matrix(X)
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_results(array_constr, algo, dtype):
# Checks that KMeans works as intended on toy dataset by comparing with
# expected results computed by hand.
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)
sample_weight = [3, 1, 1, 3]
init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.375
expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)
expected_n_iter = 2
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X, sample_weight=sample_weight)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
def test_kmeans_relocated_clusters(array_constr, algo):
# check that empty clusters are relocated as expected
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])
# second center too far from others points will be empty at first iter
init_centers = np.array([[0.5, 0.5], [3, 3]])
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.25
expected_centers = [[0.25, 0], [0.75, 1]]
expected_n_iter = 3
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
def test_relocate_empty_clusters(array_constr):
# test for the _relocate_empty_clusters_(dense/sparse) helpers
# Synthetic dataset with 3 obvious clusters of different sizes
X = np.array([-10.0, -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)
X = array_constr(X)
sample_weight = np.ones(10)
# centers all initialized to the first point of X
centers_old = np.array([-10.0, -10, -10]).reshape(-1, 1)
# With this initialization, all points will be assigned to the first center
# At this point a center in centers_new is the weighted sum of the points
# it contains if it's not empty, otherwise it is the same as before.
centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)
weight_in_clusters = np.array([10.0, 0, 0])
labels = np.zeros(10, dtype=np.int32)
if array_constr is np.array:
_relocate_empty_clusters_dense(
X, sample_weight, centers_old, centers_new, weight_in_clusters, labels
)
else:
_relocate_empty_clusters_sparse(
X.data,
X.indices,
X.indptr,
sample_weight,
centers_old,
centers_new,
weight_in_clusters,
labels,
)
# The relocation scheme will take the 2 points farthest from the center and
# assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The
# first center will be updated to contain the other 8 points.
assert_array_equal(weight_in_clusters, [8, 1, 1])
assert_allclose(centers_new, [[-36], [10], [9.5]])
@pytest.mark.parametrize("distribution", ["normal", "blobs"])
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0])
def test_kmeans_elkan_results(distribution, array_constr, tol):
# Check that results are identical between lloyd and elkan algorithms
rnd = np.random.RandomState(0)
if distribution == "normal":
X = rnd.normal(size=(5000, 10))
else:
X, _ = make_blobs(random_state=rnd)
X[X < 0] = 0
X = array_constr(X)
km_lloyd = KMeans(n_clusters=5, random_state=0, n_init=1, tol=tol)
km_elkan = KMeans(
algorithm="elkan", n_clusters=5, random_state=0, n_init=1, tol=tol
)
km_lloyd.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_lloyd.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_lloyd.labels_)
assert km_elkan.n_iter_ == km_lloyd.n_iter_
assert km_elkan.inertia_ == pytest.approx(km_lloyd.inertia_, rel=1e-6)
@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"])
def test_kmeans_convergence(algorithm):
# Check that KMeans stops when convergence is reached when tol=0. (#16075)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(5000, 10))
max_iter = 300
km = KMeans(
algorithm=algorithm,
n_clusters=5,
random_state=0,
n_init=1,
tol=0,
max_iter=max_iter,
).fit(X)
assert km.n_iter_ < max_iter
@pytest.mark.parametrize("algorithm", ["auto", "full"])
def test_algorithm_auto_full_deprecation_warning(algorithm):
X = np.random.rand(100, 2)
kmeans = KMeans(algorithm=algorithm)
with pytest.warns(
FutureWarning,
match=(
f"algorithm='{algorithm}' is deprecated, it will "
"be removed in 1.3. Using 'lloyd' instead."
),
):
kmeans.fit(X)
assert kmeans._algorithm == "lloyd"
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
centers_old = centers + rng.normal(size=centers.shape)
centers_old_csr = centers_old.copy()
centers_new = np.zeros_like(centers_old)
centers_new_csr = np.zeros_like(centers_old_csr)
weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype)
weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype)
x_squared_norms = (X**2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
sample_weight_mb = sample_weight[:10]
# step 1: compute the dense minibatch update
old_inertia = _mini_batch_step(
X_mb,
x_mb_squared_norms,
sample_weight_mb,
centers_old,
centers_new,
weight_sums,
np.random.RandomState(0),
random_reassign=False,
)
assert old_inertia > 0.0
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, sample_weight_mb, x_mb_squared_norms, centers_new
)
assert new_inertia > 0.0
assert new_inertia < old_inertia
# step 2: compute the sparse minibatch update
old_inertia_csr = _mini_batch_step(
X_mb_csr,
x_mb_squared_norms_csr,
sample_weight_mb,
centers_old_csr,
centers_new_csr,
weight_sums_csr,
np.random.RandomState(0),
random_reassign=False,
)
assert old_inertia_csr > 0.0
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, centers_new_csr
)
assert new_inertia_csr > 0.0
assert new_inertia_csr < old_inertia_csr
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_allclose(centers_new, centers_new_csr)
assert_allclose(old_inertia, old_inertia_csr)
assert_allclose(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert centers.shape == (n_clusters, n_features)
labels = km.labels_
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert km.inertia_ > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize(
"init",
["random", "k-means++", centers, lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"],
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_all_init(Estimator, data, init):
# Check KMeans and MiniBatchKMeans with all possible init.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(
init=init, n_clusters=n_clusters, random_state=42, n_init=n_init
).fit(data)
_check_fitted_model(km)
@pytest.mark.parametrize(
"init",
["random", "k-means++", centers, lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"],
)
def test_minibatch_kmeans_partial_fit_init(init):
# Check MiniBatchKMeans init with partial_fit
n_init = 10 if isinstance(init, str) else 1
km = MiniBatchKMeans(
init=init, n_clusters=n_clusters, random_state=0, n_init=n_init
)
for i in range(100):
# "random" init requires many batches to recover the true labels.
km.partial_fit(X)
_check_fitted_model(km)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fortran_aligned_data(Estimator):
# Check that KMeans works with fortran-aligned data.
X_fortran = np.asfortranarray(X)
centers_fortran = np.asfortranarray(centers)
km_c = Estimator(
n_clusters=n_clusters, init=centers, n_init=1, random_state=42
).fit(X)
km_f = Estimator(
n_clusters=n_clusters, init=centers_fortran, n_init=1, random_state=42
).fit(X_fortran)
assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_)
assert_array_equal(km_c.labels_, km_f.labels_)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("constructor", [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize(
"seed, max_iter, tol",
[
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
],
)
def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):
# check that fit.predict gives same result as fit_predict
rng = np.random.RandomState(seed)
X = make_blobs(n_samples=1000, n_features=10, centers=10, random_state=rng)[
0
].astype(dtype, copy=False)
X = constructor(X)
kmeans = KMeans(
algorithm=algo, n_clusters=10, random_state=seed, tol=tol, max_iter=max_iter
)
labels_1 = kmeans.fit(X).predict(X)
labels_2 = kmeans.fit_predict(X)
assert_array_equal(labels_1, labels_2)
def test_minibatch_kmeans_verbose():
# Check verbose mode of MiniBatchKMeans for better coverage.
km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
km.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"])
@pytest.mark.parametrize("tol", [1e-2, 0])
def test_kmeans_verbose(algorithm, tol, capsys):
# Check verbose mode of KMeans for better coverage.
X = np.random.RandomState(0).normal(size=(5000, 10))
KMeans(
algorithm=algorithm,
n_clusters=n_clusters,
random_state=42,
init="random",
n_init=1,
tol=tol,
verbose=1,
).fit(X)
captured = capsys.readouterr()
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
if tol == 0:
assert re.search(r"strict convergence", captured.out)
else:
assert re.search(r"center shift .* within tolerance", captured.out)
def test_minibatch_kmeans_warning_init_size():
# Check that a warning is raised when init_size is smaller than n_clusters
with pytest.warns(
RuntimeWarning, match=r"init_size.* should be larger than n_clusters"
):
MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_warning_n_init_precomputed_centers(Estimator):
# Check that a warning is raised when n_init > 1 and an array is passed for
# the init parameter.
with pytest.warns(
RuntimeWarning,
match="Explicit initial center position passed: performing only one init",
):
Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X)
def test_minibatch_sensible_reassign():
# check that identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5, random_state=42)
zeroed_X[::2, :] = 0
km = MiniBatchKMeans(
n_clusters=20, batch_size=10, random_state=42, init="random"
).fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with batch-size > X.shape[0] (regression test)
km = MiniBatchKMeans(
n_clusters=20, batch_size=200, random_state=42, init="random"
).fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with partial_fit API
km = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
km.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_minibatch_reassign(data):
# Check the reassignment part of the minibatch step with very high or very
# low reassignment ratio.
perfect_centers = np.empty((n_clusters, n_features))
for i in range(n_clusters):
perfect_centers[i] = X[true_labels == i].mean(axis=0)
x_squared_norms = row_norms(data, squared=True)
sample_weight = np.ones(n_samples)
centers_new = np.empty_like(perfect_centers)
# Give a perfect initialization, but a large reassignment_ratio, as a
# result many centers should be reassigned and the model should no longer
# be good
score_before = -_labels_inertia(
data, sample_weight, x_squared_norms, perfect_centers, 1
)[1]
_mini_batch_step(
data,
x_squared_norms,
sample_weight,
perfect_centers,
centers_new,
np.zeros(n_clusters),
np.random.RandomState(0),
random_reassign=True,
reassignment_ratio=1,
)
score_after = -_labels_inertia(
data, sample_weight, x_squared_norms, centers_new, 1
)[1]
assert score_before > score_after
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned.
_mini_batch_step(
data,
x_squared_norms,
sample_weight,
perfect_centers,
centers_new,
np.zeros(n_clusters),
np.random.RandomState(0),
random_reassign=True,
reassignment_ratio=1e-15,
)
assert_allclose(centers_new, perfect_centers)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size. Run the test with 100 clusters and a batch_size of
# 10 because it turned out that these values ensure that the number of
# clusters to reassign is always bigger than the batch_size.
MiniBatchKMeans(
n_clusters=100,
batch_size=10,
init_size=n_samples,
random_state=42,
verbose=True,
).fit(X)
def test_minibatch_kmeans_init_size():
# Check the internal _init_size attribute of MiniBatchKMeans
# default init size should be 3 * batch_size
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)
assert km._init_size == 15
# if 3 * batch size < n_clusters, it should then be 3 * n_clusters
km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)
assert km._init_size == 30
# it should not be larger than n_samples
km = MiniBatchKMeans(
n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1
).fit(X)
assert km._init_size == n_samples
@pytest.mark.parametrize("tol, max_no_improvement", [(1e-4, None), (0, 10)])
def test_minibatch_declared_convergence(capsys, tol, max_no_improvement):
# Check convergence detection based on ewa batch inertia or on
# small center change.
X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True)
km = MiniBatchKMeans(
n_clusters=3,
init=centers,
batch_size=20,
tol=tol,
random_state=0,
max_iter=10,
n_init=1,
verbose=1,
max_no_improvement=max_no_improvement,
)
km.fit(X)
assert 1 < km.n_iter_ < 10
captured = capsys.readouterr()
if max_no_improvement is None:
assert "Converged (small centers change)" in captured.out
if tol == 0:
assert "Converged (lack of improvement in inertia)" in captured.out
def test_minibatch_iter_steps():
# Check consistency of n_iter_ and n_steps_ attributes.
batch_size = 30
n_samples = X.shape[0]
km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0).fit(X)
# n_iter_ is the number of started epochs
assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples)
assert isinstance(km.n_iter_, int)
# without stopping condition, max_iter should be reached
km = MiniBatchKMeans(
n_clusters=3,
batch_size=batch_size,
random_state=0,
tol=0,
max_no_improvement=None,
max_iter=10,
).fit(X)
assert km.n_iter_ == 10
assert km.n_steps_ == (10 * n_samples) // batch_size
assert isinstance(km.n_steps_, int)
def test_kmeans_copyx():
# Check that copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check that my_X is de-centered
assert_allclose(my_X, X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_score_max_iter(Estimator):
# Check that fitting KMeans or MiniBatchKMeans with more iterations gives
# better score
X = np.random.RandomState(0).randn(100, 10)
km1 = Estimator(n_init=1, random_state=42, max_iter=1)
s1 = km1.fit(X).score(X)
km2 = Estimator(n_init=1, random_state=42, max_iter=10)
s2 = km2.fit(X).score(X)
assert s2 > s1
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("init", ["random", "k-means++"])
@pytest.mark.parametrize(
"Estimator, algorithm",
[(KMeans, "lloyd"), (KMeans, "elkan"), (MiniBatchKMeans, None)],
)
def test_predict(Estimator, algorithm, init, dtype, array_constr):
# Check the predict method and the equivalence between fit.predict and
# fit_predict.
X, _ = make_blobs(n_samples=500, n_features=10, centers=10, random_state=0)
X = array_constr(X)
km = Estimator(n_clusters=10, init=init, n_init=10, random_state=0)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_array_equal(pred, labels)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, labels)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(10))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_dense_sparse(Estimator):
# Check that the results are the same for dense and sparse input.
sample_weight = np.random.RandomState(0).random_sample((n_samples,))
km_dense = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)
km_dense.fit(X, sample_weight=sample_weight)
km_sparse = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)
km_sparse.fit(X_csr, sample_weight=sample_weight)
assert_array_equal(km_dense.labels_, km_sparse.labels_)
assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_)
@pytest.mark.parametrize(
"init", ["random", "k-means++", centers], ids=["random", "k-means++", "ndarray"]
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_predict_dense_sparse(Estimator, init):
# check that models trained on sparse input also works for dense input at
# predict time and vice versa.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0)
km.fit(X_csr)
assert_array_equal(km.predict(X), km.labels_)
km.fit(X)
assert_array_equal(km.predict(X_csr), km.labels_)
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("init", ["k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_integer_input(Estimator, array_constr, dtype, init):
# Check that KMeans and MiniBatchKMeans work with integer input.
X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]])
X = array_constr(X_dense, dtype=dtype)
n_init = 1 if init == "ndarray" else 10
init = X_dense[:2] if init == "ndarray" else init
km = Estimator(n_clusters=2, init=init, n_init=n_init, random_state=0)
if Estimator is MiniBatchKMeans:
km.set_params(batch_size=2)
km.fit(X)
# Internally integer input should be converted to float64
assert km.cluster_centers_.dtype == np.float64
expected_labels = [0, 1, 1, 0, 0, 1]
assert_array_equal(km.labels_, expected_labels)
# Same with partial_fit (#14314)
if Estimator is MiniBatchKMeans:
km = clone(km).partial_fit(X)
assert km.cluster_centers_.dtype == np.float64
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_transform(Estimator):
# Check the transform method
km = Estimator(n_clusters=n_clusters).fit(X)
# Transorfming cluster_centers_ should return the pairwise distances
# between centers
Xt = km.transform(km.cluster_centers_)
assert_allclose(Xt, pairwise_distances(km.cluster_centers_))
# In particular, diagonal must be 0
assert_array_equal(Xt.diagonal(), np.zeros(n_clusters))
# Transorfming X should return the pairwise distances between X and the
# centers
Xt = km.transform(X)
assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fit_transform(Estimator):
# Check equivalence between fit.transform and fit_transform
X1 = Estimator(random_state=0, n_init=1).fit(X).transform(X)
X2 = Estimator(random_state=0, n_init=1).fit_transform(X)
assert_allclose(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
previous_inertia = np.inf
for n_init in [1, 5, 10]:
# set max_iter=1 to avoid finding the global minimum and get the same
# inertia each time
km = KMeans(
n_clusters=n_clusters,
init="random",
n_init=n_init,
random_state=0,
max_iter=1,
).fit(X)
assert km.inertia_ <= previous_inertia
def test_k_means_function():
# test calling the k_means function directly
cluster_centers, labels, inertia = k_means(
X, n_clusters=n_clusters, sample_weight=None
)
assert cluster_centers.shape == (n_clusters, n_features)
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert inertia > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_float_precision(Estimator, data):
# Check that the results are the same for single and double precision.
km = Estimator(n_init=1, random_state=0)
inertia = {}
Xt = {}
centers = {}
labels = {}
for dtype in [np.float64, np.float32]:
X = data.astype(dtype, copy=False)
km.fit(X)
inertia[dtype] = km.inertia_
Xt[dtype] = km.transform(X)
centers[dtype] = km.cluster_centers_
labels[dtype] = km.labels_
# dtype of cluster centers has to be the dtype of the input data
assert km.cluster_centers_.dtype == dtype
# same with partial_fit
if Estimator is MiniBatchKMeans:
km.partial_fit(X[0:3])
assert km.cluster_centers_.dtype == dtype
# compare arrays with low precision since the difference between 32 and
# 64 bit comes from an accumulation of rounding errors.
assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5)
assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5)
assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5)
assert_array_equal(labels[np.float32], labels[np.float64])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_centers_not_mutated(Estimator, dtype):
# Check that KMeans and MiniBatchKMeans won't mutate the user provided
# init centers silently even if input data and init centers have the same
# type.
X_new_type = X.astype(dtype, copy=False)
centers_new_type = centers.astype(dtype, copy=False)
km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1)
km.fit(X_new_type)
assert not np.may_share_memory(km.cluster_centers_, centers_new_type)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_kmeans_init_fitted_centers(data):
# Check that starting fitting from a local optimum shouldn't change the
# solution
km1 = KMeans(n_clusters=n_clusters).fit(data)
km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit(data)
assert_allclose(km1.cluster_centers_, km2.cluster_centers_)
def test_kmeans_warns_less_centers_than_unique_points():
# Check KMeans when the number of found clusters is smaller than expected
X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated
km = KMeans(n_clusters=4)
# KMeans should warn that fewer labels than cluster centers have been used
msg = (
r"Number of distinct clusters \(3\) found smaller than "
r"n_clusters \(4\). Possibly due to duplicate points in X."
)
with pytest.warns(ConvergenceWarning, match=msg):
km.fit(X)
# only three distinct points, so only three clusters
# can have points assigned to them
assert set(km.labels_) == set(range(3))
def _sort_centers(centers):
return np.sort(centers, axis=0)
def test_weighted_vs_repeated():
# Check that a sample weight of N should yield the same result as an N-fold
# repetition of the sample. Valid only if init is precomputed, otherwise
# rng produces different results. Not valid for MinibatchKMeans due to rng
# to extract minibatches.
sample_weight = np.random.RandomState(0).randint(1, 5, size=n_samples)
X_repeat = np.repeat(X, sample_weight, axis=0)
km = KMeans(init=centers, n_init=1, n_clusters=n_clusters, random_state=0)
km_weighted = clone(km).fit(X, sample_weight=sample_weight)
repeated_labels = np.repeat(km_weighted.labels_, sample_weight)
km_repeated = clone(km).fit(X_repeat)
assert_array_equal(km_repeated.labels_, repeated_labels)
assert_allclose(km_weighted.inertia_, km_repeated.inertia_)
assert_allclose(
_sort_centers(km_weighted.cluster_centers_),
_sort_centers(km_repeated.cluster_centers_),
)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_unit_weights_vs_no_weights(Estimator, data):
# Check that not passing sample weights should be equivalent to passing
# sample weights all equal to one.
sample_weight = np.ones(n_samples)
km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)
km_none = clone(km).fit(data, sample_weight=None)
km_ones = clone(km).fit(data, sample_weight=sample_weight)
assert_array_equal(km_none.labels_, km_ones.labels_)
assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_scaled_weights(Estimator, data):
# Check that scaling all sample weights by a common factor
# shouldn't change the result
sample_weight = np.random.RandomState(0).uniform(n_samples)
km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)
km_orig = clone(km).fit(data, sample_weight=sample_weight)
km_scaled = clone(km).fit(data, sample_weight=0.5 * sample_weight)
assert_array_equal(km_orig.labels_, km_scaled.labels_)
assert_allclose(km_orig.cluster_centers_, km_scaled.cluster_centers_)
def test_kmeans_elkan_iter_attribute():
# Regression test on bad n_iter_ value. Previous bug n_iter_ was one off
# it's right value (#11340).
km = KMeans(algorithm="elkan", max_iter=1).fit(X)
assert km.n_iter_ == 1
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
def test_kmeans_empty_cluster_relocated(array_constr):
# check that empty clusters are correctly relocated when using sample
# weights (#13486)
X = array_constr([[-1], [1]])
sample_weight = [1.9, 0.1]
init = np.array([[-1], [10]])
km = KMeans(n_clusters=2, init=init, n_init=1)
km.fit(X, sample_weight=sample_weight)
assert len(set(km.labels_)) == 2
assert_allclose(km.cluster_centers_, [[-1], [1]])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_result_equal_in_diff_n_threads(Estimator):
# Check that KMeans/MiniBatchKMeans give the same results in parallel mode
# than in sequential mode.
rnd = np.random.RandomState(0)
X = rnd.normal(size=(50, 10))
with threadpool_limits(limits=1, user_api="openmp"):
result_1 = Estimator(n_clusters=n_clusters, random_state=0).fit(X).labels_
with threadpool_limits(limits=2, user_api="openmp"):
result_2 = Estimator(n_clusters=n_clusters, random_state=0).fit(X).labels_
assert_array_equal(result_1, result_2)
def test_warning_elkan_1_cluster():
# Check warning messages specific to KMeans
with pytest.warns(
RuntimeWarning,
match="algorithm='elkan' doesn't make sense for a single cluster",
):
KMeans(n_clusters=1, algorithm="elkan").fit(X)
@pytest.mark.parametrize(
"array_constr", [np.array, sp.csr_matrix], ids=["dense", "sparse"]
)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
def test_k_means_1_iteration(array_constr, algo):
# check the results after a single iteration (E-step M-step E-step) by
# comparing against a pure python implementation.
X = np.random.RandomState(0).uniform(size=(100, 5))
init_centers = X[:5]
X = array_constr(X)
def py_kmeans(X, init):
new_centers = init.copy()
labels = pairwise_distances_argmin(X, init)
for label in range(init.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
return labels, new_centers
py_labels, py_centers = py_kmeans(X, init_centers)
cy_kmeans = KMeans(
n_clusters=5, n_init=1, init=init_centers, algorithm=algo, max_iter=1
).fit(X)
cy_labels = cy_kmeans.labels_
cy_centers = cy_kmeans.cluster_centers_
assert_array_equal(py_labels, cy_labels)
assert_allclose(py_centers, cy_centers)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("squared", [True, False])
def test_euclidean_distance(dtype, squared):
# Check that the _euclidean_(dense/sparse)_dense helpers produce correct
# results
rng = np.random.RandomState(0)
a_sparse = sp.random(
1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype
)
a_dense = a_sparse.toarray().reshape(-1)
b = rng.randn(100).astype(dtype, copy=False)
b_squared_norm = (b**2).sum()
expected = ((a_dense - b) ** 2).sum()
expected = expected if squared else np.sqrt(expected)
distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)
distance_sparse_dense = _euclidean_sparse_dense_wrapper(
a_sparse.data, a_sparse.indices, b, b_squared_norm, squared
)
assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6)
assert_allclose(distance_dense_dense, expected, rtol=1e-6)
assert_allclose(distance_sparse_dense, expected, rtol=1e-6)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_inertia(dtype):
# Check that the _inertia_(dense/sparse) helpers produce correct results.
rng = np.random.RandomState(0)
X_sparse = sp.random(
100, 10, density=0.5, format="csr", random_state=rng, dtype=dtype
)
X_dense = X_sparse.toarray()
sample_weight = rng.randn(100).astype(dtype, copy=False)
centers = rng.randn(5, 10).astype(dtype, copy=False)
labels = rng.randint(5, size=100, dtype=np.int32)
distances = ((X_dense - centers[labels]) ** 2).sum(axis=1)
expected = np.sum(distances * sample_weight)
inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels, n_threads=1)
inertia_sparse = _inertia_sparse(
X_sparse, sample_weight, centers, labels, n_threads=1
)
assert_allclose(inertia_dense, inertia_sparse, rtol=1e-6)
assert_allclose(inertia_dense, expected, rtol=1e-6)
assert_allclose(inertia_sparse, expected, rtol=1e-6)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_sample_weight_unchanged(Estimator):
# Check that sample_weight is not modified in place by KMeans (#17204)
X = np.array([[1], [2], [4]])
sample_weight = np.array([0.5, 0.2, 0.3])
Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight)
assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3]))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
@pytest.mark.parametrize(
"param, match",
[
({"n_init": 0}, r"n_init should be > 0"),
({"max_iter": 0}, r"max_iter should be > 0"),
({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"),
(
{"init": X[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters",
),
(
{"init": lambda X_, k, random_state: X_[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters",
),
(
{"init": X[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data",
),
(
{"init": lambda X_, k, random_state: X_[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data",
),
(
{"init": "wrong"},
r"init should be either 'k-means\+\+', 'random', "
r"an array-like or a callable",
),
],
)
def test_wrong_params(Estimator, param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the parameters
# Set n_init=1 by default to avoid warning with precomputed init
km = Estimator(n_init=1)
with pytest.raises(ValueError, match=match):
km.set_params(**param).fit(X)
@pytest.mark.parametrize(
"param, match",
[({"algorithm": "wrong"}, r"Algorithm must be either 'lloyd' or 'elkan'")],
)
def test_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the KMeans specific parameters
with pytest.raises(ValueError, match=match):
KMeans(**param).fit(X)
@pytest.mark.parametrize(
"param, match",
[
({"max_no_improvement": -1}, r"max_no_improvement should be >= 0"),
({"batch_size": -1}, r"batch_size should be > 0"),
({"init_size": -1}, r"init_size should be > 0"),
({"reassignment_ratio": -1}, r"reassignment_ratio should be >= 0"),
],
)
def test_minibatch_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the MiniBatchKMeans specific parameters
with pytest.raises(ValueError, match=match):
MiniBatchKMeans(**param).fit(X)
@pytest.mark.parametrize(
"param, match",
[
(
{"n_local_trials": 0},
r"n_local_trials is set to 0 but should be an "
r"integer value greater than zero",
),
(
{"x_squared_norms": X[:2]},
r"The length of x_squared_norms .* should "
r"be equal to the length of n_samples",
),
],
)
def test_kmeans_plusplus_wrong_params(param, match):
with pytest.raises(ValueError, match=match):
kmeans_plusplus(X, n_clusters, **param)
@pytest.mark.parametrize("data", [X, X_csr])
@pytest.mark.parametrize("dtype", [np.float64, np.float32])
def test_kmeans_plusplus_output(data, dtype):
# Check for the correct number of seeds and all positive values
data = data.astype(dtype)
centers, indices = kmeans_plusplus(data, n_clusters)
# Check there are the correct number of indices and that all indices are
# positive and within the number of samples
assert indices.shape[0] == n_clusters
assert (indices >= 0).all()
assert (indices <= data.shape[0]).all()
# Check for the correct number of seeds and that they are bound by the data
assert centers.shape[0] == n_clusters
assert (centers.max(axis=0) <= data.max(axis=0)).all()
assert (centers.min(axis=0) >= data.min(axis=0)).all()
# Check that indices correspond to reported centers
# Use X for comparison rather than data, test still works against centers
# calculated with sparse data.
assert_allclose(X[indices].astype(dtype), centers)
@pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None])
def test_kmeans_plusplus_norms(x_squared_norms):
# Check that defining x_squared_norms returns the same as default=None.
centers, indices = kmeans_plusplus(X, n_clusters, x_squared_norms=x_squared_norms)
assert_allclose(X[indices], centers)
def test_kmeans_plusplus_dataorder():
# Check that memory layout does not effect result
centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=0)
X_fortran = np.asfortranarray(X)
centers_fortran, _ = kmeans_plusplus(X_fortran, n_clusters, random_state=0)
assert_allclose(centers_c, centers_fortran)
def test_is_same_clustering():
# Sanity check for the _is_same_clustering utility function
labels1 = np.array([1, 0, 0, 1, 2, 0, 2, 1], dtype=np.int32)
assert _is_same_clustering(labels1, labels1, 3)
# these other labels represent the same clustering since we can retrive the first
# labels by simply renaming the labels: 0 -> 1, 1 -> 2, 2 -> 0.
labels2 = np.array([0, 2, 2, 0, 1, 2, 1, 0], dtype=np.int32)
assert _is_same_clustering(labels1, labels2, 3)
# these other labels do not represent the same clustering since not all ones are
# mapped to a same value
labels3 = np.array([1, 0, 0, 2, 2, 0, 2, 1], dtype=np.int32)
assert not _is_same_clustering(labels1, labels3, 3)
@pytest.mark.parametrize(
"kwargs", ({"init": np.str_("k-means++")}, {"init": [[0, 0], [1, 1]], "n_init": 1})
)
def test_kmeans_with_array_like_or_np_scalar_init(kwargs):
"""Check that init works with numpy scalar strings.
Non-regression test for #21964.
"""
X = np.asarray([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=np.float64)
clustering = KMeans(n_clusters=2, **kwargs)
# Does not raise
clustering.fit(X)
@pytest.mark.parametrize(
"Klass, method",
[(KMeans, "fit"), (MiniBatchKMeans, "fit"), (MiniBatchKMeans, "partial_fit")],
)
def test_feature_names_out(Klass, method):
"""Check `feature_names_out` for `KMeans` and `MiniBatchKMeans`."""
class_name = Klass.__name__.lower()
kmeans = Klass()
getattr(kmeans, method)(X)
n_clusters = kmeans.cluster_centers_.shape[0]
names_out = kmeans.get_feature_names_out()
assert_array_equal([f"{class_name}{i}" for i in range(n_clusters)], names_out)
|
|
from __future__ import unicode_literals
import threading
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "'foo' is an invalid keyword argument for this function"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertFalse(isinstance('', EmptyQuerySet))
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
with self.assertRaises(TypeError):
s.refresh_from_db(unknown_kwarg=10)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
|
|
"""
ARIMA model class.
Author: Chad Fulton
License: BSD-3
"""
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.statespace import sarimax
from statsmodels.tsa.statespace.kalman_filter import MEMORY_CONSERVE
from statsmodels.tsa.statespace.tools import diff
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.arima.estimators.yule_walker import yule_walker
from statsmodels.tsa.arima.estimators.burg import burg
from statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen
from statsmodels.tsa.arima.estimators.innovations import (
innovations, innovations_mle)
from statsmodels.tsa.arima.estimators.gls import gls as estimate_gls
from statsmodels.tsa.arima.specification import SARIMAXSpecification
class ARIMA(sarimax.SARIMAX):
"""
Autoregressive Integrated Moving Average (ARIMA) model, and extensions
This model is the basic interface for ARIMA-type models, including those
with exogenous regressors and those with seasonal components. The most
general form of the model is SARIMAX(p, d, q)x(P, D, Q, s). It also allows
all specialized cases, including
- autoregressive models: AR(p)
- moving average models: MA(q)
- mixed autoregressive moving average models: ARMA(p, q)
- integration models: ARIMA(p, d, q)
- seasonal models: SARIMA(P, D, Q, s)
- regression with errors that follow one of the above ARIMA-type models
Parameters
----------
endog : array_like, optional
The observed time-series process :math:`y`.
exog : array_like, optional
Array of exogenous regressors.
order : tuple, optional
The (p,d,q) order of the model for the autoregressive, differences, and
moving average components. d is always an integer, while p and q may
either be integers or lists of integers.
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity. Default
is (0, 0, 0, 0). D and s are always integers, while P and Q
may either be integers or lists of positive integers.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend. Can be specified as a
string where 'c' indicates a constant term, 't' indicates a
linear trend in time, and 'ct' includes both. Can also be specified as
an iterable defining a polynomial, as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is 'c' for
models without integration, and no trend for models with integration.
enforce_stationarity : bool, optional
Whether or not to require the autoregressive parameters to correspond
to a stationarity process.
enforce_invertibility : bool, optional
Whether or not to require the moving average parameters to correspond
to an invertible process.
concentrate_scale : bool, optional
Whether or not to concentrate the scale (variance of the error term)
out of the likelihood. This reduces the number of parameters by one.
This is only applicable when considering estimation by numerical
maximum likelihood.
trend_offset : int, optional
The offset at which to start time trend values. Default is 1, so that
if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only
set when the model created by extending a previous dataset.
dates : array_like of datetime, optional
If no index is given by `endog` or `exog`, an array-like object of
datetime objects can be provided.
freq : str, optional
If no index is given by `endog` or `exog`, the frequency of the
time-series may be specified here as a Pandas offset or offset string.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none'.
Notes
-----
This model incorporates both exogenous regressors and trend components
through "regression with ARIMA errors".
`enforce_stationarity` and `enforce_invertibility` are specified in the
constructor because they affect loglikelihood computations, and so should
not be changed on the fly. This is why they are not instead included as
arguments to the `fit` method.
TODO: should we use concentrate_scale=True by default?
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
def __init__(self, endog, exog=None, order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
enforce_stationarity=True, enforce_invertibility=True,
concentrate_scale=False, trend_offset=1, dates=None,
freq=None, missing='none', validate_specification=True):
# Default for trend
# 'c' if there is no integration and 'n' otherwise
# TODO: if trend='c', then we could alternatively use `demean=True` in
# the estimation methods rather than setting up `exog` and using GLS.
# Not sure if it's worth the trouble though.
integrated = order[1] > 0 or seasonal_order[1] > 0
if trend is None and not integrated:
trend = 'c'
elif trend is None:
trend = 'n'
# Construct the specification
# (don't pass specific values of enforce stationarity/invertibility,
# because we don't actually want to restrict the estimators based on
# this criteria. Instead, we'll just make sure that the parameter
# estimates from those methods satisfy the criteria.)
self._spec_arima = SARIMAXSpecification(
endog, exog=exog, order=order, seasonal_order=seasonal_order,
trend=trend, enforce_stationarity=None, enforce_invertibility=None,
concentrate_scale=concentrate_scale, trend_offset=trend_offset,
dates=dates, freq=freq, missing=missing,
validate_specification=validate_specification)
exog = self._spec_arima._model.data.orig_exog
# Raise an error if we have a constant in an integrated model
has_trend = len(self._spec_arima.trend_terms) > 0
if has_trend:
lowest_trend = np.min(self._spec_arima.trend_terms)
if lowest_trend < order[1] + seasonal_order[1]:
raise ValueError(
'In models with integration (`d > 0`) or seasonal'
' integration (`D > 0`), trend terms of lower order than'
' `d + D` cannot be (as they would be eliminated due to'
' the differencing operation). For example, a constant'
' cannot be included in an ARIMA(1, 1, 1) model, but'
' including a linear trend, which would have the same'
' effect as fitting a constant to the differenced data,'
' is allowed.')
# Keep the given `exog` by removing the prepended trend variables
input_exog = None
if exog is not None:
if _is_using_pandas(exog, None):
input_exog = exog.iloc[:, self._spec_arima.k_trend:]
else:
input_exog = exog[:, self._spec_arima.k_trend:]
# Initialize the base SARIMAX class
# Note: we don't pass in a trend value to the base class, since ARIMA
# standardizes the trend to always be part of exog, while the base
# SARIMAX class puts it in the transition equation.
super(ARIMA, self).__init__(
endog, exog, trend=None, order=order,
seasonal_order=seasonal_order,
enforce_stationarity=enforce_stationarity,
enforce_invertibility=enforce_invertibility,
concentrate_scale=concentrate_scale, dates=dates, freq=freq,
missing=missing, validate_specification=validate_specification)
self.trend = trend
# Save the input exog and input exog names, so that we can refer to
# them later (see especially `ARIMAResults.append`)
self._input_exog = input_exog
if exog is not None:
self._input_exog_names = self.exog_names[self._spec_arima.k_trend:]
else:
self._input_exog_names = None
# Override the public attributes for k_exog and k_trend to reflect the
# distinction here (for the purpose of the superclass, these are both
# combined as `k_exog`)
self.k_exog = self._spec_arima.k_exog
self.k_trend = self._spec_arima.k_trend
# Remove some init kwargs that aren't used in this model
unused = ['measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'hamilton_representation']
self._init_keys = [key for key in self._init_keys if key not in unused]
@property
def _res_classes(self):
return {'fit': (ARIMAResults, ARIMAResultsWrapper)}
def fit(self, start_params=None, transformed=True, includes_fixed=False,
method=None, method_kwargs=None, gls=None, gls_kwargs=None,
cov_type=None, cov_kwds=None, return_params=False,
low_memory=False):
"""
Fit (estimate) the parameters of the model.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
method : str, optional
The method used for estimating the parameters of the model. Valid
options include 'statespace', 'innovations_mle', 'hannan_rissanen',
'burg', 'innovations', and 'yule_walker'. Not all options are
available for every specification (for example 'yule_walker' can
only be used with AR(p) models).
method_kwargs : dict, optional
Arguments to pass to the fit function for the parameter estimator
described by the `method` argument.
gls : bool, optional
Whether or not to use generalized least squares (GLS) to estimate
regression effects. The default is False if `method='statespace'`
and is True otherwise.
gls_kwargs : dict, optional
Arguments to pass to the GLS estimation fit method. Only applicable
if GLS estimation is used (see `gls` argument for details).
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'opg' unless memory conservation is used to avoid
computing the loglikelihood values for each observation, in which
case the default is 'oim'.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Default is False.
Returns
-------
ARIMAResults
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
# Determine which method to use
# 1. If method is specified, make sure it is valid
if method is not None:
self._spec_arima.validate_estimator(method)
# 2. Otherwise, use state space
# TODO: may want to consider using innovations (MLE) if possible here,
# (since in some cases it may be faster than state space), but it is
# less tested.
else:
method = 'statespace'
# Can only use fixed parameters with method='statespace'
if self._has_fixed_params and method != 'statespace':
raise ValueError('When parameters have been fixed, only the method'
' "statespace" can be used; got "%s".' % method)
# Handle kwargs related to the fit method
if method_kwargs is None:
method_kwargs = {}
required_kwargs = []
if method == 'statespace':
required_kwargs = ['enforce_stationarity', 'enforce_invertibility',
'concentrate_scale']
elif method == 'innovations_mle':
required_kwargs = ['enforce_invertibility']
for name in required_kwargs:
if name in method_kwargs:
raise ValueError('Cannot override model level value for "%s"'
' when method="%s".' % (name, method))
method_kwargs[name] = getattr(self, name)
# Handle kwargs related to GLS estimation
if gls_kwargs is None:
gls_kwargs = {}
# Handle starting parameters
# TODO: maybe should have standard way of computing starting
# parameters in this class?
if start_params is not None:
if method not in ['statespace', 'innovations_mle']:
raise ValueError('Estimation method "%s" does not use starting'
' parameters, but `start_params` argument was'
' given.' % method)
method_kwargs['start_params'] = start_params
method_kwargs['transformed'] = transformed
method_kwargs['includes_fixed'] = includes_fixed
# Perform estimation, depending on whether we have exog or not
p = None
fit_details = None
has_exog = self._spec_arima.exog is not None
if has_exog or method == 'statespace':
# Use GLS if it was explicitly requested (`gls = True`) or if it
# was left at the default (`gls = None`) and the ARMA estimator is
# anything but statespace.
# Note: both GLS and statespace are able to handle models with
# integration, so we don't need to difference endog or exog here.
if has_exog and (gls or (gls is None and method != 'statespace')):
p, fit_details = estimate_gls(
self.endog, exog=self.exog, order=self.order,
seasonal_order=self.seasonal_order, include_constant=False,
arma_estimator=method, arma_estimator_kwargs=method_kwargs,
**gls_kwargs)
elif method != 'statespace':
raise ValueError('If `exog` is given and GLS is disabled'
' (`gls=False`), then the only valid'
" method is 'statespace'. Got '%s'."
% method)
else:
method_kwargs.setdefault('disp', 0)
res = super(ARIMA, self).fit(
return_params=return_params, low_memory=low_memory,
cov_type=cov_type, cov_kwds=cov_kwds, **method_kwargs)
if not return_params:
res.fit_details = res.mlefit
else:
# Handle differencing if we have an integrated model
# (these methods do not support handling integration internally,
# so we need to manually do the differencing)
endog = self.endog
order = self._spec_arima.order
seasonal_order = self._spec_arima.seasonal_order
if self._spec_arima.is_integrated:
warnings.warn('Provided `endog` series has been differenced'
' to eliminate integration prior to parameter'
' estimation by method "%s".' % method)
endog = diff(
endog, k_diff=self._spec_arima.diff,
k_seasonal_diff=self._spec_arima.seasonal_diff,
seasonal_periods=self._spec_arima.seasonal_periods)
if order[1] > 0:
order = (order[0], 0, order[2])
if seasonal_order[1] > 0:
seasonal_order = (seasonal_order[0], 0, seasonal_order[2],
seasonal_order[3])
# Now, estimate parameters
if method == 'yule_walker':
p, fit_details = yule_walker(
endog, ar_order=order[0], demean=False,
**method_kwargs)
elif method == 'burg':
p, fit_details = burg(endog, ar_order=order[0],
demean=False, **method_kwargs)
elif method == 'hannan_rissanen':
p, fit_details = hannan_rissanen(
endog, ar_order=order[0],
ma_order=order[2], demean=False, **method_kwargs)
elif method == 'innovations':
p, fit_details = innovations(
endog, ma_order=order[2], demean=False,
**method_kwargs)
# innovations computes estimates through the given order, so
# we want to take the estimate associated with the given order
p = p[-1]
elif method == 'innovations_mle':
p, fit_details = innovations_mle(
endog, order=order,
seasonal_order=seasonal_order,
demean=False, **method_kwargs)
# In all cases except method='statespace', we now need to extract the
# parameters and, optionally, create a new results object
if p is not None:
# Need to check that fitted parameters satisfy given restrictions
if (self.enforce_stationarity
and self._spec_arima.max_reduced_ar_order > 0
and not p.is_stationary):
raise ValueError('Non-stationary autoregressive parameters'
' found with `enforce_stationarity=True`.'
' Consider setting it to False or using a'
' different estimation method, such as'
' method="statespace".')
if (self.enforce_invertibility
and self._spec_arima.max_reduced_ma_order > 0
and not p.is_invertible):
raise ValueError('Non-invertible moving average parameters'
' found with `enforce_invertibility=True`.'
' Consider setting it to False or using a'
' different estimation method, such as'
' method="statespace".')
# Build the requested results
if return_params:
res = p.params
else:
# Handle memory conservation option
if low_memory:
conserve_memory = self.ssm.conserve_memory
self.ssm.set_conserve_memory(MEMORY_CONSERVE)
# Perform filtering / smoothing
if (self.ssm.memory_no_predicted or self.ssm.memory_no_gain
or self.ssm.memory_no_smoothing):
func = self.filter
else:
func = self.smooth
res = func(p.params, transformed=True, includes_fixed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
# Save any details from the fit method
res.fit_details = fit_details
# Reset memory conservation
if low_memory:
self.ssm.set_conserve_memory(conserve_memory)
return res
@Appender(sarimax.SARIMAXResults.__doc__)
class ARIMAResults(sarimax.SARIMAXResults):
@Appender(sarimax.SARIMAXResults.append.__doc__)
def append(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):
# MLEResults.append will concatenate the given `exog` here with
# `data.orig_exog`. However, `data.orig_exog` already has had any
# trend variables prepended to it, while the `exog` given here should
# not. Instead, we need to temporarily replace `orig_exog` and
# `exog_names` with the ones that correspond to those that were input
# by the user.
if exog is not None:
orig_exog = self.model.data.orig_exog
exog_names = self.model.exog_names
self.model.data.orig_exog = self.model._input_exog
self.model.exog_names = self.model._input_exog_names
# Perform the appending procedure
out = super().append(endog, exog=exog, refit=refit,
fit_kwargs=fit_kwargs, **kwargs)
# Now we reverse the temporary change made above
if exog is not None:
self.model.data.orig_exog = orig_exog
self.model.exog_names = exog_names
return out
class ARIMAResultsWrapper(sarimax.SARIMAXResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(
sarimax.SARIMAXResultsWrapper._wrap_attrs, _attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
sarimax.SARIMAXResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults) # noqa:E305
|
|
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
def plot_validation_data(error_rates):
import matplotlib.pyplot as plt
plt.plot(range(len(error_rates)),error_rates)
plt.title('Accuracy over CNN Training Period')
plt.xlabel('iteration number')
plt.ylabel('test error rate')
plt.show()
def evaluate_lenet5(learning_rate=0.1, n_epochs=250,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500, plot_results=False):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 40 * 40)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (40, 40) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 40, 40))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (40-7+1 , 40-7+1) = (34, 34)
# maxpooling reduces this further to (34/2, 34/2) = (17, 17)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 17, 17)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 40, 40),
filter_shape=(nkerns[0], 1, 7, 7),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (17-7+1, 17-7+1) = (11, 11)
# maxpooling reduces this further to (11/2, 11/2) = (5, 5)
# 4D output tensor is thus of shape (nkerns[0], nkerns[1], 5, 5)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 17, 17),
filter_shape=(nkerns[1], nkerns[0], 7, 7),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 5 * 5),
# or (500, 50 * 5 * 5) = (500, 1250) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 5 * 5,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=62)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
error_rates = []
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print 'epoch is', epoch
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
print 'iter is', iter
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
error_rates.append(this_validation_loss * 100.)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if plot_results:
plot_validation_data(error_rates)
if __name__ == '__main__':
evaluate_lenet5(dataset='EnglishNatural.gz',plot_results=True)
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
|
|
"""Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
from django.utils.unittest import result
from django.utils.unittest.util import\
safe_repr, safe_str, strclass,\
unorderable_list_difference
from django.utils.unittest.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, basestring):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure, e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertTrue(isinstance(d1, dict), 'First argument is not a dictionary')
self.assertTrue(isinstance(d2, dict), 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertTrue(isinstance(first, basestring), (
'First argument is not a string'))
self.assertTrue(isinstance(second, basestring), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception, exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.