gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to collect and publish performance samples to various sinks."""
import abc
import io
import itertools
import json
import logging
import operator
import sys
import time
import uuid
from perfkitbenchmarker import disk
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.sample import Sample
FLAGS = flags.FLAGS
flags.DEFINE_string(
'product_name',
'PerfKitBenchmarker',
'The product name to use when publishing results.')
flags.DEFINE_boolean(
'official',
False,
'A boolean indicating whether results are official or not. The '
'default is False. Official test results are treated and queried '
'differently from non-official test results.')
flags.DEFINE_string(
'json_path',
None,
'A path to write newline-delimited JSON results '
'Default: write to a run-specific temporary directory')
flags.DEFINE_boolean(
'collapse_labels',
True,
'Collapse entries in labels.')
flags.DEFINE_string(
'bigquery_table',
None,
'The BigQuery table to publish results to. This should be of the form '
'"[project_id:]dataset_name.table_name".')
flags.DEFINE_string(
'bq_path', 'bq', 'Path to the "bq" executable.')
flags.DEFINE_string(
'bq_project', None, 'Project to use for authenticating with BigQuery.')
flags.DEFINE_string(
'service_account', None, 'Service account to use to authenticate with BQ.')
flags.DEFINE_string(
'service_account_private_key', None,
'Service private key for authenticating with BQ.')
flags.DEFINE_string(
'gsutil_path', 'gsutil', 'path to the "gsutil" executable')
flags.DEFINE_string(
'cloud_storage_bucket',
None,
'GCS bucket to upload records to. Bucket must exist.')
flags.DEFINE_list(
'metadata',
[],
'A list of key-value pairs that will be added to the labels field of all '
'samples as metadata. Each key-value pair in the list should be colon '
'separated.')
DEFAULT_JSON_OUTPUT_NAME = 'perfkitbenchmarker_results.json'
DEFAULT_CREDENTIALS_JSON = 'credentials.json'
GCS_OBJECT_NAME_LENGTH = 20
def GetLabelsFromDict(metadata):
"""Converts a metadata dictionary to a string of labels.
Args:
metadata: a dictionary of string key value pairs.
Returns:
A string of labels in the format that Perfkit uses.
"""
labels = []
for k, v in metadata.iteritems():
labels.append('|%s:%s|' % (k, v))
return ','.join(labels)
class MetadataProvider(object):
"""A provider of sample metadata."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def AddMetadata(self, metadata, benchmark_spec):
"""Add metadata to a dictionary.
Existing values will be overwritten.
Args:
metadata: dict. Dictionary of metadata to update.
benchmark_spec: BenchmarkSpec. The benchmark specification.
Returns:
Updated 'metadata'.
"""
raise NotImplementedError()
class DefaultMetadataProvider(MetadataProvider):
"""Adds default metadata to samples."""
def AddMetadata(self, metadata, benchmark_spec):
metadata = metadata.copy()
metadata['perfkitbenchmarker_version'] = version.VERSION
metadata['cloud'] = benchmark_spec.cloud
# Get the unique zone names from the VMs.
metadata['zones'] = ','.join(set([vm.zone for vm in benchmark_spec.vms]))
# Get a representative VM so that we can publish the machine type and
# image. If we support different machine types/images in the same benchmark
# this will need to be updated.
vm = benchmark_spec.vms[0]
metadata['machine_type'] = vm.machine_type
metadata['image'] = vm.image
# Scratch disk is not defined when a benchmark config is provided.
if getattr(benchmark_spec, 'scratch_disk', None):
metadata.update(scratch_disk_type=benchmark_spec.scratch_disk_type,
scratch_disk_size=benchmark_spec.scratch_disk_size,
num_striped_disks=FLAGS.num_striped_disks)
if benchmark_spec.scratch_disk_type == disk.PIOPS:
metadata['scratch_disk_iops'] = benchmark_spec.scratch_disk_iops
# User specified metadata
for pair in FLAGS.metadata:
try:
key, value = pair.split(':')
metadata[key] = value
except ValueError:
logging.error('Bad metadata flag format. Skipping "%s".', pair)
continue
return metadata
DEFAULT_METADATA_PROVIDERS = [DefaultMetadataProvider()]
class SamplePublisher(object):
"""An object that can publish performance samples."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def PublishSamples(self, samples):
"""Publishes 'samples'.
PublishSamples will be called exactly once. Calling
SamplePublisher.PublishSamples multiple times may result in data being
overwritten.
Args:
samples: list of dicts to publish.
"""
raise NotImplementedError()
class PrettyPrintStreamPublisher(SamplePublisher):
"""Writes samples to an output stream, defaulting to stdout.
Samples are pretty-printed and summarized. Example output (truncated):
-------------------------PerfKitBenchmarker Results Summary--------------
COREMARK:
num_cpus="4"
Coremark Score 44145.237832
End to End Runtime 289.477677 seconds
NETPERF:
client_machine_type="n1-standard-4" client_zone="us-central1-a" ....
TCP_RR_Transaction_Rate 1354.04 transactions_per_second (ip_type="ext ...
TCP_RR_Transaction_Rate 3972.70 transactions_per_second (ip_type="int ...
TCP_CRR_Transaction_Rate 449.69 transactions_per_second (ip_type="ext ...
TCP_CRR_Transaction_Rate 1271.68 transactions_per_second (ip_type="int ...
TCP_STREAM_Throughput 1171.04 Mbits/sec (ip_type="ext ...
TCP_STREAM_Throughput 6253.24 Mbits/sec (ip_type="int ...
UDP_RR_Transaction_Rate 1380.37 transactions_per_second (ip_type="ext ...
UDP_RR_Transaction_Rate 4336.37 transactions_per_second (ip_type="int ...
End to End Runtime 444.33 seconds
-------------------------
For all tests: cloud="GCP" image="ubuntu-14-04" machine_type="n1-standa ...
Attributes:
stream: File-like object. Output stream to print samples.
"""
def __init__(self, stream=None):
self.stream = stream or sys.stdout
def __repr__(self):
return '<{0} stream={1}>'.format(type(self).__name__, self.stream)
def _FindConstantMetadataKeys(self, samples):
"""Finds metadata keys which are constant across a collection of samples.
Args:
samples: List of dicts, as passed to SamplePublisher.PublishSamples.
Returns:
The set of metadata keys for which all samples in 'samples' have the same
value.
"""
unique_values = {}
for sample in samples:
for k, v in sample['metadata'].iteritems():
if len(unique_values.setdefault(k, set())) < 2:
unique_values[k].add(v)
# Find keys which are not present in all samples
for sample in samples:
for k in frozenset(unique_values) - frozenset(sample['metadata']):
unique_values[k].add(None)
return frozenset(k for k, v in unique_values.iteritems() if len(v) == 1)
def _FormatMetadata(self, metadata):
"""Format 'metadata' as space-delimited key="value" pairs."""
return ' '.join('{0}="{1}"'.format(k, v)
for k, v in sorted(metadata.iteritems()))
def PublishSamples(self, samples):
# result will store the formatted text, then be emitted to self.stream and
# logged.
result = io.BytesIO()
dashes = '-' * 25
result.write('\n' + dashes +
'PerfKitBenchmarker Results Summary' +
dashes + '\n')
if not samples:
logging.debug('Pretty-printing results to %s:\n%s', self.stream,
result.getvalue())
self.stream.write(result.getvalue())
return
key = operator.itemgetter('test')
samples = sorted(samples, key=key)
globally_constant_keys = self._FindConstantMetadataKeys(samples)
for benchmark, test_samples in itertools.groupby(samples, key):
test_samples = list(test_samples)
# Drop end-to-end runtime: it always has no metadata.
non_endtoend_samples = [i for i in test_samples
if i['metric'] != 'End to End Runtime']
locally_constant_keys = (
self._FindConstantMetadataKeys(non_endtoend_samples) -
globally_constant_keys)
all_constant_meta = globally_constant_keys.union(locally_constant_keys)
benchmark_meta = {k: v for k, v in test_samples[0]['metadata'].iteritems()
if k in locally_constant_keys}
result.write('{0}:\n'.format(benchmark.upper()))
if benchmark_meta:
result.write(' {0}\n'.format(
self._FormatMetadata(benchmark_meta)))
for sample in test_samples:
meta = {k: v for k, v in sample['metadata'].iteritems()
if k not in all_constant_meta}
result.write(' {0:<30s} {1:>15f} {2:<30s}'.format(
sample['metric'], sample['value'], sample['unit']))
if meta:
result.write(' ({0})'.format(self._FormatMetadata(meta)))
result.write('\n')
global_meta = {k: v for k, v in samples[0]['metadata'].iteritems()
if k in globally_constant_keys}
result.write('\n' + dashes + '\n')
result.write('For all tests: {0}\n'.format(
self._FormatMetadata(global_meta)))
value = result.getvalue()
logging.debug('Pretty-printing results to %s:\n%s', self.stream, value)
self.stream.write(value)
class LogPublisher(SamplePublisher):
"""Writes samples to a Python Logger.
Attributes:
level: Logging level. Defaults to logging.INFO.
logger: Logger to publish to. Defaults to the root logger.
"""
def __init__(self, level=logging.INFO, logger=None):
self.level = level
self.logger = logger or logging.getLogger()
def __repr__(self):
return '<{0} logger={1} level={2}>'.format(type(self).__name__, self.logger,
self.level)
def PublishSamples(self, samples):
data = [
'\n' + '-' * 25 + 'PerfKitBenchmarker Complete Results' + '-' * 25 +
'\n']
for sample in samples:
data.append('%s\n' % sample)
self.logger.log(self.level, ''.join(data))
# TODO: Extract a function to write delimited JSON to a stream.
class NewlineDelimitedJSONPublisher(SamplePublisher):
"""Publishes samples to a file as newline delimited JSON.
The resulting output file is compatible with 'bq load' using
format NEWLINE_DELIMITED_JSON.
If 'collapse_labels' is True, metadata is converted to a flat string with key
'labels' via GetLabelsFromDict.
Attributes:
file_path: string. Destination path to write samples.
mode: Open mode for 'file_path'. Set to 'a' to append.
collapse_labels: boolean. If true, collapse sample metadata.
"""
def __init__(self, file_path, mode='wb', collapse_labels=True):
self.file_path = file_path
self.mode = mode
self.collapse_labels = collapse_labels
def __repr__(self):
return '<{0} file_path="{1}" mode="{2}">'.format(
type(self).__name__, self.file_path, self.mode)
def PublishSamples(self, samples):
logging.info('Publishing %d samples to %s', len(samples),
self.file_path)
with open(self.file_path, self.mode) as fp:
for sample in samples:
sample = sample.copy()
if self.collapse_labels:
sample['labels'] = GetLabelsFromDict(sample.pop('metadata', {}))
fp.write(json.dumps(sample) + '\n')
class BigQueryPublisher(SamplePublisher):
"""Publishes samples to BigQuery.
Attributes:
bigquery_table: string. The bigquery table to publish to, of the form
'[project_name:]dataset_name.table_name'
project_id: string. Project to use for authenticating with BigQuery.
bq_path: string. Path to the 'bq' executable'.
service_account: string. Use this service account email address for
authorization. For example, 1234567890@developer.gserviceaccount.com
service_account_private_key: Filename that contains the service account
private key. Must be specified if service_account is specified.
"""
def __init__(self, bigquery_table, project_id=None, bq_path='bq',
service_account=None, service_account_private_key_file=None):
self.bigquery_table = bigquery_table
self.project_id = project_id
self.bq_path = bq_path
self.service_account = service_account
self.service_account_private_key_file = service_account_private_key_file
self._credentials_file = vm_util.PrependTempDir(DEFAULT_CREDENTIALS_JSON)
if ((self.service_account is None) !=
(self.service_account_private_key_file is None)):
raise ValueError('service_account and service_account_private_key '
'must be specified together.')
def __repr__(self):
return '<{0} table="{1}">'.format(type(self).__name__, self.bigquery_table)
def PublishSamples(self, samples):
if not samples:
logging.warn('No samples: not publishing to BigQuery')
return
with vm_util.NamedTemporaryFile(prefix='perfkit-bq-pub',
dir=vm_util.GetTempDir(),
suffix='.json') as tf:
json_publisher = NewlineDelimitedJSONPublisher(tf.name,
collapse_labels=True)
json_publisher.PublishSamples(samples)
tf.close()
logging.info('Publishing %d samples to %s', len(samples),
self.bigquery_table)
load_cmd = [self.bq_path]
if self.project_id:
load_cmd.append('--project_id=' + self.project_id)
if self.service_account:
assert self.service_account_private_key_file is not None
load_cmd.extend(['--service_account=' + self.service_account,
'--service_account_credential_file=' +
self._credentials_file,
'--service_account_private_key_file=' +
self.service_account_private_key_file])
load_cmd.extend(['load',
'--source_format=NEWLINE_DELIMITED_JSON',
self.bigquery_table,
tf.name])
vm_util.IssueRetryableCommand(load_cmd)
class CloudStoragePublisher(SamplePublisher):
"""Publishes samples to a Google Cloud Storage bucket using gsutil.
Samples are formatted using a NewlineDelimitedJSONPublisher, and written to a
the destination file within the specified bucket named:
<time>_<uri>
where <time> is the number of milliseconds since the Epoch, and <uri> is a
random UUID.
Attributes:
bucket: string. The GCS bucket name to publish to.
gsutil_path: string. The path to the 'gsutil' tool.
"""
def __init__(self, bucket, gsutil_path='gsutil'):
self.bucket = bucket
self.gsutil_path = gsutil_path
def __repr__(self):
return '<{0} bucket="{1}">'.format(type(self).__name__, self.bucket)
def _GenerateObjectName(self):
object_name = str(int(time.time() * 100)) + '_' + str(uuid.uuid4())
return object_name[:GCS_OBJECT_NAME_LENGTH]
def PublishSamples(self, samples):
with vm_util.NamedTemporaryFile(prefix='perfkit-gcs-pub',
dir=vm_util.GetTempDir(),
suffix='.json') as tf:
json_publisher = NewlineDelimitedJSONPublisher(tf.name)
json_publisher.PublishSamples(samples)
tf.close()
object_name = self._GenerateObjectName()
storage_uri = 'gs://{0}/{1}'.format(self.bucket, object_name)
logging.info('Publishing %d samples to %s', len(samples), storage_uri)
copy_cmd = [self.gsutil_path, 'cp', tf.name, storage_uri]
vm_util.IssueRetryableCommand(copy_cmd)
class SampleCollector(object):
"""A performance sample collector.
Supports incorporating additional metadata into samples, and publishing
results via any number of SamplePublishers.
Attributes:
samples: A list of Sample objects.
metadata_providers: A list of MetadataProvider objects. Metadata providers
to use. Defaults to DEFAULT_METADATA_PROVIDERS.
publishers: A list of SamplePublisher objects. If not specified, defaults to
a LogPublisher, PrettyPrintStreamPublisher, NewlineDelimitedJSONPublisher,
a BigQueryPublisher if FLAGS.bigquery_table is specified, and a
CloudStoragePublisher if FLAGS.cloud_storage_bucket is specified. See
SampleCollector._DefaultPublishers.
run_uri: A unique tag for the run.
"""
def __init__(self, metadata_providers=None, publishers=None):
self.samples = []
if metadata_providers is not None:
self.metadata_providers = metadata_providers
else:
self.metadata_providers = DEFAULT_METADATA_PROVIDERS
if publishers is not None:
self.publishers = publishers
else:
self.publishers = SampleCollector._DefaultPublishers()
logging.debug('Using publishers: {0}'.format(self.publishers))
self.run_uri = str(uuid.uuid4())
@classmethod
def _DefaultPublishers(cls):
"""Gets a list of default publishers."""
publishers = [LogPublisher(), PrettyPrintStreamPublisher()]
default_json_path = vm_util.PrependTempDir(DEFAULT_JSON_OUTPUT_NAME)
publishers.append(NewlineDelimitedJSONPublisher(
FLAGS.json_path or default_json_path,
collapse_labels=FLAGS.collapse_labels))
if FLAGS.bigquery_table:
publishers.append(BigQueryPublisher(
FLAGS.bigquery_table,
project_id=FLAGS.bq_project,
bq_path=FLAGS.bq_path,
service_account=FLAGS.service_account,
service_account_private_key_file=FLAGS.service_account_private_key))
if FLAGS.cloud_storage_bucket:
publishers.append(CloudStoragePublisher(FLAGS.cloud_storage_bucket,
gsutil_path=FLAGS.gsutil_path))
return publishers
def AddSamples(self, samples, benchmark, benchmark_spec):
"""Adds data samples to the publisher.
Args:
samples: Either a list of Sample objects (preferred) or a list of 3 or
4-tuples (deprecated). The tuples contain the metric name (string), the
value (float), and unit (string) of each sample. If a 4th element is
included, it is a dictionary of metadata associated with the sample.
benchmark: string. The name of the benchmark.
benchmark_spec: BenchmarkSpec. Benchmark specification.
"""
for s in samples:
# Convert input in deprecated format to Sample objects.
if isinstance(s, (list, tuple)):
if len(s) not in (3, 4):
raise ValueError(
'Invalid sample "{0}": should be 3- or 4-tuple.'.format(s))
s = Sample(*s)
# Annotate the sample.
sample = dict(s.asdict())
sample['test'] = benchmark
for meta_provider in self.metadata_providers:
sample['metadata'] = meta_provider.AddMetadata(
sample['metadata'], benchmark_spec)
sample['product_name'] = FLAGS.product_name
sample['official'] = FLAGS.official
sample['owner'] = FLAGS.owner
sample['timestamp'] = time.time()
sample['run_uri'] = self.run_uri
sample['sample_uri'] = str(uuid.uuid4())
events.sample_created.send(benchmark_spec=benchmark_spec,
sample=sample)
self.samples.append(sample)
def PublishSamples(self):
"""Publish samples via all registered publishers."""
for publisher in self.publishers:
publisher.PublishSamples(self.samples)
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder common internal object model"""
import contextlib
import datetime
from oslo_log import log as logging
from oslo_utils import versionutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
LOG = logging.getLogger('object')
obj_make_list = base.obj_make_list
class CinderObjectVersionsHistory(dict):
"""Helper class that maintains objects version history.
Current state of object versions is aggregated in a single version number
that explicitily identifies a set of object versions. That way a service
is able to report what objects it supports using a single string and all
the newer services will know exactly what that mean for a single object.
"""
def __init__(self):
super(CinderObjectVersionsHistory, self).__init__()
# NOTE(dulek): This is our pre-history and a starting point - Liberty.
# We want Mitaka to be able to talk to Liberty services, so we need to
# handle backporting to these objects versions (although I don't expect
# we've made a lot of incompatible changes inside the objects).
#
# If an object doesn't exist in Liberty, RPC API compatibility layer
# shouldn't send it or convert it to a dictionary.
#
# Please note that we do not need to add similar entires for each
# release. Liberty is here just for historical reasons.
self.versions = ['liberty']
self['liberty'] = {
'Backup': '1.1',
'BackupImport': '1.1',
'BackupList': '1.0',
'ConsistencyGroup': '1.1',
'ConsistencyGroupList': '1.0',
'Service': '1.0',
'ServiceList': '1.0',
'Snapshot': '1.0',
'SnapshotList': '1.0',
'Volume': '1.1',
'VolumeAttachment': '1.0',
'VolumeAttachmentList': '1.0',
'VolumeList': '1.1',
'VolumeType': '1.0',
'VolumeTypeList': '1.0',
}
def get_current(self):
return self.versions[-1]
def get_current_versions(self):
return self[self.get_current()]
def add(self, ver, updates):
if ver in self.versions:
msg = 'Version %s already exists in history.' % ver
raise exception.ProgrammingError(reason=msg)
self[ver] = self[self.get_current()].copy()
self.versions.append(ver)
self[ver].update(updates)
OBJ_VERSIONS = CinderObjectVersionsHistory()
# NOTE(dulek): You should add a new version here each time you bump a version
# of any object. As a second parameter you need to specify only what changed.
#
# When dropping backward compatibility with an OpenStack release we can rework
# this and remove some history while keeping the versions order.
OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3',
'CGSnapshot': '1.0', 'CGSnapshotList': '1.0',
'ConsistencyGroup': '1.2',
'ConsistencyGroupList': '1.1', 'Service': '1.1',
'Volume': '1.3', 'VolumeTypeList': '1.1'})
OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'})
OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'})
OBJ_VERSIONS.add('1.3', {'Service': '1.3'})
OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'})
OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'})
OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0',
'QualityOfServiceSpecsList': '1.0',
'VolumeType': '1.2'})
OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0',
'Service': '1.4', 'Volume': '1.4',
'ConsistencyGroup': '1.3'})
OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'})
OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'})
OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5',
'RequestSpec': '1.1', 'VolumeProperties': '1.1'})
OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0',
'Group': '1.1'})
class CinderObjectRegistry(base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
"""Hook called when registering a class.
This method takes care of adding the class to cinder.objects namespace.
Should registering class have a method called cinder_ovo_cls_init it
will be called to support class initialization. This is convenient
for all persistent classes that need to register their models.
"""
setattr(objects, cls.obj_name(), cls)
# If registering class has a callable initialization method, call it.
if callable(getattr(cls, 'cinder_ovo_cls_init', None)):
cls.cinder_ovo_cls_init()
class CinderObject(base.VersionedObject):
# NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova,
# cinder, and other objects can exist on the same bus and be distinguished
# from one another.
OBJ_PROJECT_NAMESPACE = 'cinder'
def cinder_obj_get_changes(self):
"""Returns a dict of changed fields with tz unaware datetimes.
Any timezone aware datetime field will be converted to UTC timezone
and returned as timezone unaware datetime.
This will allow us to pass these fields directly to a db update
method as they can't have timezone information.
"""
# Get dirtied/changed fields
changes = self.obj_get_changes()
# Look for datetime objects that contain timezone information
for k, v in changes.items():
if isinstance(v, datetime.datetime) and v.tzinfo:
# Remove timezone information and adjust the time according to
# the timezone information's offset.
changes[k] = v.replace(tzinfo=None) - v.utcoffset()
# Return modified dict
return changes
def obj_make_compatible(self, primitive, target_version):
_log_backport(self, target_version)
super(CinderObject, self).obj_make_compatible(primitive,
target_version)
def __contains__(self, name):
# We're using obj_extra_fields to provide aliases for some fields while
# in transition period. This override is to make these aliases pass
# "'foo' in obj" tests.
return name in self.obj_extra_fields or super(CinderObject,
self).__contains__(name)
class CinderObjectDictCompat(base.VersionedObjectDictCompat):
"""Mix-in to provide dictionary key access compat.
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
NOTE(berrange) This class will eventually be deleted.
"""
def get(self, key, value=base._NotSpecifiedSentinel):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
if key not in self.obj_fields:
# NOTE(jdg): There are a number of places where we rely on the
# old dictionary version and do a get(xxx, None).
# The following preserves that compatibility but in
# the future we'll remove this shim altogether so don't
# rely on it.
LOG.debug('Cinder object %(object_name)s has no '
'attribute named: %(attribute_name)s',
{'object_name': self.__class__.__name__,
'attribute_name': key})
return None
if (value != base._NotSpecifiedSentinel and
key not in self.obj_extra_fields and
not self.obj_attr_is_set(key)):
return value
else:
try:
return getattr(self, key)
except (exception.ObjectActionError, NotImplementedError):
# Exception when haven't set a value for non-lazy
# loadable attribute, but to mimic typical dict 'get'
# behavior we should still return None
return None
class CinderPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects.
"""
OPTIONAL_FIELDS = []
Not = db.Not
Case = db.Case
fields = {
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.BooleanField(default=False,
nullable=True),
}
@classmethod
def cinder_ovo_cls_init(cls):
"""This method is called on OVO registration and sets the DB model."""
# Persistent Versioned Objects Classes should have a DB model, and if
# they don't, then we have a problem and we must raise an exception on
# registration.
try:
cls.model = db.get_model_for_versioned_object(cls)
except (ImportError, AttributeError):
msg = _("Couldn't find ORM model for Persistent Versioned "
"Object %s.") % cls.obj_name()
raise exception.ProgrammingError(reason=msg)
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
@classmethod
def _get_expected_attrs(cls, context, *args, **kwargs):
return None
@classmethod
def get_by_id(cls, context, id, *args, **kwargs):
# To get by id we need to have a model and for the model to
# have an id field
if 'id' not in cls.fields:
msg = (_('VersionedObject %s cannot retrieve object by id.') %
(cls.obj_name()))
raise NotImplementedError(msg)
orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs)
expected_attrs = cls._get_expected_attrs(context)
# We pass parameters because fields to expect may depend on them
expected_attrs = cls._get_expected_attrs(context, *args, **kwargs)
kargs = {}
if expected_attrs:
kargs = {'expected_attrs': expected_attrs}
return cls._from_db_object(context, cls(context), orm_obj, **kargs)
def conditional_update(self, values, expected_values=None, filters=(),
save_all=False, session=None, reflect_changes=True,
order=None):
"""Compare-and-swap update.
A conditional object update that, unlike normal update, will SAVE the
contents of the update to the DB.
Update will only occur in the DB and the object if conditions are met.
If no expected_values are passed in we will default to make sure that
all fields have not been changed in the DB. Since we cannot know the
original value in the DB for dirty fields in the object those will be
excluded.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that
can be passed to a sqlalchemy query's filter method, for example:
.. code-block:: python
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the
'values' argument. For example:
.. code-block:: python
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
volume.conditional_update({'status': case_values},
{'status': 'available'}))
And we can use DB fields using model class attribute for example to
store previous status in the corresponding field even though we don't
know which value is in the db from those we allowed:
.. code-block:: python
volume.conditional_update({'status': 'deleting',
'previous_status': volume.model.status},
{'status': ('available', 'error')})
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met for
the update to be executed.
:param filters: Iterable with additional filters
:param save_all: Object may have changes that are not in the DB, this
will say whether we want those changes saved as well.
:param session: Session to use for the update
:param reflect_changes: If we want changes made in the database to be
reflected in the versioned object. This may
mean in some cases that we have to reload the
object from the database.
:param order: Specific order of fields in which to update the values
:returns: number of db rows that were updated, which can be used as a
boolean, since it will be 0 if we couldn't update the DB and
1 if we could, because we are using unique index id.
"""
if 'id' not in self.fields:
msg = (_('VersionedObject %s does not support conditional update.')
% (self.obj_name()))
raise NotImplementedError(msg)
# If no conditions are set we will require object in DB to be unchanged
if expected_values is None:
changes = self.obj_what_changed()
expected = {key: getattr(self, key)
for key in self.fields.keys()
if self.obj_attr_is_set(key) and key not in changes and
key not in self.OPTIONAL_FIELDS}
else:
# Set the id in expected_values to limit conditional update to only
# change this object
expected = expected_values.copy()
expected['id'] = self.id
# If we want to save any additional changes the object has besides the
# ones referred in values
if save_all:
changes = self.cinder_obj_get_changes()
changes.update(values)
values = changes
result = db.conditional_update(self._context, self.model, values,
expected, filters, order=order)
# If we were able to update the DB then we need to update this object
# as well to reflect new DB contents and clear the object's dirty flags
# for those fields.
if result and reflect_changes:
# If we have used a Case, a db field or an expression in values we
# don't know which value was used, so we need to read the object
# back from the DB
if any(isinstance(v, self.Case) or db.is_orm_value(v)
for v in values.values()):
# Read back object from DB
obj = type(self).get_by_id(self._context, self.id)
db_values = obj.obj_to_primitive()['versioned_object.data']
# Only update fields were changes were requested
values = {field: db_values[field]
for field, value in values.items()}
# NOTE(geguileo): We don't use update method because our objects
# will eventually move away from VersionedObjectDictCompat
for key, value in values.items():
setattr(self, key, value)
self.obj_reset_changes(values.keys())
return result
def refresh(self):
# To refresh we need to have a model and for the model to have an id
# field
if 'id' not in self.fields:
msg = (_('VersionedObject %s cannot retrieve object by id.') %
(self.obj_name()))
raise NotImplementedError(msg)
current = self.get_by_id(self._context, self.id)
# Copy contents retrieved from the DB into self
my_data = vars(self)
my_data.clear()
my_data.update(vars(current))
@classmethod
def exists(cls, context, id_):
return db.resource_exists(context, cls.model, id_)
class CinderComparableObject(base.ComparableVersionedObject):
def __eq__(self, obj):
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() == obj.obj_to_primitive()
return False
def __ne__(self, other):
return not self.__eq__(other)
class ObjectListBase(base.ObjectListBase):
def obj_make_compatible(self, primitive, target_version):
_log_backport(self, target_version)
super(ObjectListBase, self).obj_make_compatible(primitive,
target_version)
class ClusteredObject(object):
@property
def service_topic_queue(self):
return self.cluster_name or self.host
class CinderObjectSerializer(base.VersionedObjectSerializer):
OBJ_BASE_CLASS = CinderObject
def __init__(self, version_cap=None):
super(CinderObjectSerializer, self).__init__()
self.version_cap = version_cap
# NOTE(geguileo): During upgrades we will use a manifest to ensure that
# all objects are properly backported. This allows us to properly
# backport child objects to the right version even if parent version
# has not been bumped.
if not version_cap or version_cap == OBJ_VERSIONS.get_current():
self.manifest = None
else:
if version_cap not in OBJ_VERSIONS:
raise exception.CappedVersionUnknown(version=version_cap)
self.manifest = OBJ_VERSIONS[version_cap]
def _get_capped_obj_version(self, obj):
objname = obj.obj_name()
version_dict = OBJ_VERSIONS.get(self.version_cap, {})
version_cap = version_dict.get(objname, None)
if version_cap:
cap_tuple = versionutils.convert_version_to_tuple(version_cap)
obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION)
if cap_tuple > obj_tuple:
# NOTE(dulek): Do not set version cap to be higher than actual
# object version as we don't support "forwardporting" of
# objects. If service will receive an object that's too old it
# should handle it explicitly.
version_cap = None
return version_cap
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
# NOTE(dulek): Backport outgoing object to the capped version.
backport_ver = self._get_capped_obj_version(entity)
entity = entity.obj_to_primitive(backport_ver, self.manifest)
return entity
def _log_backport(ovo, target_version):
"""Log backported versioned objects."""
if target_version and target_version != ovo.VERSION:
LOG.debug('Backporting %(obj_name)s from version %(src_vers)s '
'to version %(dst_vers)s',
{'obj_name': ovo.obj_name(),
'src_vers': ovo.VERSION,
'dst_vers': target_version})
|
|
from unittest.mock import MagicMock
from unittest.mock import ANY
import json
import sys
from botocore import exceptions
import pytest
from garcon import activity
from garcon import event
from garcon import runner
from garcon import task
from garcon import utils
from tests.fixtures import decider
def activity_run(
monkeypatch, boto_client, poll=None, complete=None, fail=None,
execute=None):
"""Create an activity.
"""
current_activity = activity.Activity(boto_client)
poll = poll or dict()
monkeypatch.setattr(
current_activity, 'execute_activity',
execute or MagicMock(return_value=dict()))
monkeypatch.setattr(
boto_client, 'poll_for_activity_task', MagicMock(return_value=poll))
monkeypatch.setattr(
boto_client, 'respond_activity_task_completed', complete or MagicMock())
monkeypatch.setattr(
boto_client, 'respond_activity_task_failed', fail or MagicMock())
return current_activity
@pytest.fixture(params=[0, 1, 2])
def generators(request):
generators = []
if request.param >= 1:
def igenerator(context):
for i in range(10):
yield {'i': i}
generators.append(igenerator)
if request.param == 2:
def dgenerator(context):
for i in range(10):
yield {'d': i * 2}
generators.append(dgenerator)
return generators
@pytest.fixture
def poll():
return dict(
activityId='something',
taskToken='taskToken')
def test_poll_for_activity(monkeypatch, poll, boto_client):
"""Test that poll_for_activity successfully polls.
"""
activity_task = poll
current_activity = activity_run(monkeypatch, boto_client, poll)
boto_client.poll_for_activity_task.return_value = activity_task
activity_execution = current_activity.poll_for_activity()
assert boto_client.poll_for_activity_task.called
assert activity_execution.task_token is poll.get('taskToken')
def test_poll_for_activity_throttle_retry(monkeypatch, poll, boto_client):
"""Test that SWF throttles are retried during polling.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
boto_client.poll_for_activity_task.side_effect = exceptions.ClientError(
{'Error': {'Code': 'ThrottlingException'}},
'operation name')
with pytest.raises(exceptions.ClientError):
current_activity.poll_for_activity()
assert boto_client.poll_for_activity_task.call_count == 5
def test_poll_for_activity_error(monkeypatch, poll, boto_client):
"""Test that non-throttle errors during poll are thrown.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
exception = Exception()
boto_client.poll_for_activity_task.side_effect = exception
with pytest.raises(Exception):
current_activity.poll_for_activity()
def test_poll_for_activity_identity(monkeypatch, poll, boto_client):
"""Test that identity is passed to poll_for_activity.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
current_activity.poll_for_activity(identity='foo')
boto_client.poll_for_activity_task.assert_called_with(
domain=ANY, taskList=ANY, identity='foo')
def test_poll_for_activity_no_identity(monkeypatch, poll, boto_client):
"""Test poll_for_activity works without identity passed as param.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
current_activity.poll_for_activity()
boto_client.poll_for_activity_task.assert_called_with(
domain=ANY, taskList=ANY)
def test_run_activity(monkeypatch, poll, boto_client):
"""Run an activity.
"""
current_activity = activity_run(monkeypatch, boto_client, poll=poll)
current_activity.run()
boto_client.poll_for_activity_task.assert_called_with(
domain=ANY, taskList=ANY)
assert current_activity.execute_activity.called
assert boto_client.respond_activity_task_completed.called
def test_run_activity_identity(monkeypatch, poll, boto_client):
"""Run an activity with identity as param.
"""
current_activity = activity_run(monkeypatch, boto_client, poll=poll)
current_activity.run(identity='foo')
boto_client.poll_for_activity_task.assert_called_with(
domain=ANY, taskList=ANY, identity='foo')
assert current_activity.execute_activity.called
assert boto_client.respond_activity_task_completed.called
def test_run_capture_exception(monkeypatch, poll, boto_client):
"""Run an activity with an exception raised during activity execution.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
current_activity.on_exception = MagicMock()
current_activity.execute_activity = MagicMock()
error_msg_long = "Error" * 100
actual_error_msg = error_msg_long[:255]
current_activity.execute_activity.side_effect = Exception(error_msg_long)
current_activity.run()
assert boto_client.poll_for_activity_task.called
assert current_activity.execute_activity.called
assert current_activity.on_exception.called
boto_client.respond_activity_task_failed.assert_called_with(
taskToken=poll.get('taskToken'),
reason=actual_error_msg)
assert not boto_client.respond_activity_task_completed.called
def test_run_capture_fail_exception(monkeypatch, poll, boto_client):
"""Run an activity with an exception raised during failing execution.
"""
current_activity = activity_run(monkeypatch, boto_client, poll)
current_activity.on_exception = MagicMock()
current_activity.execute_activity = MagicMock()
current_activity.complete = MagicMock()
current_activity.fail = MagicMock()
error_msg_long = "Error" * 100
current_activity.complete.side_effect = Exception(error_msg_long)
current_activity.fail.side_effect = Exception(error_msg_long)
current_activity.run()
assert boto_client.poll_for_activity_task.called
assert current_activity.execute_activity.called
assert not current_activity.complete.called
assert not current_activity.fail.called
assert current_activity.on_exception.called
def test_run_capture_poll_exception(monkeypatch, boto_client, poll):
"""Run an activity with an exception raised during poll.
"""
current_activity = activity_run(monkeypatch, boto_client, poll=poll)
current_activity.on_exception = MagicMock()
current_activity.execute_activity = MagicMock()
exception = Exception('poll exception')
boto_client.poll_for_activity_task.side_effect = exception
current_activity.run()
assert boto_client.poll_for_activity_task.called
assert current_activity.on_exception.called
assert not current_activity.execute_activity.called
assert not boto_client.respond_activity_task_completed.called
current_activity.on_exception = None
current_activity.logger.error = MagicMock()
current_activity.run()
current_activity.logger.error.assert_called_with(exception, exc_info=True)
def test_run_activity_without_id(monkeypatch, boto_client):
"""Run an activity without an activity id.
"""
current_activity = activity_run(monkeypatch, boto_client, poll=dict())
current_activity.run()
assert boto_client.poll_for_activity_task.called
assert not current_activity.execute_activity.called
assert not boto_client.respond_activity_task_completed.called
def test_run_activity_with_context(monkeypatch, boto_client, poll):
"""Run an activity with a context.
"""
context = dict(foo='bar')
poll.update(input=json.dumps(context))
current_activity = activity_run(monkeypatch, boto_client, poll=poll)
current_activity.run()
activity_execution = current_activity.execute_activity.call_args[0][0]
assert activity_execution.context == context
def test_run_activity_with_result(monkeypatch, boto_client, poll):
"""Run an activity with a result.
"""
result = dict(foo='bar')
mock = MagicMock(return_value=result)
current_activity = activity_run(monkeypatch, boto_client, poll=poll,
execute=mock)
current_activity.run()
boto_client.respond_activity_task_completed.assert_called_with(
result=json.dumps(result), taskToken=poll.get('taskToken'))
def test_task_failure(monkeypatch, boto_client, poll):
"""Run an activity that has a bad task.
"""
resp = dict(foo='bar')
mock = MagicMock(return_value=resp)
reason = 'fail'
current_activity = activity_run(monkeypatch, boto_client, poll=poll,
execute=mock)
current_activity.on_exception = MagicMock()
current_activity.execute_activity.side_effect = Exception(reason)
current_activity.run()
boto_client.respond_activity_task_failed.assert_called_with(
taskToken=poll.get('taskToken'),
reason=reason)
def test_task_failure_on_close_activity(monkeypatch, boto_client, poll):
"""Run an activity failure when the task is already closed.
"""
resp = dict(foo='bar')
mock = MagicMock(return_value=resp)
current_activity = activity_run(monkeypatch, boto_client, poll=poll,
execute=mock)
current_activity.on_exception = MagicMock()
current_activity.execute_activity.side_effect = Exception('fail')
boto_client.respond_activity_task_failed.side_effect = Exception('fail')
current_activity.unset_log_context = MagicMock()
current_activity.run()
assert current_activity.unset_log_context.called
def test_execute_activity(monkeypatch, boto_client):
"""Test the execution of an activity.
"""
monkeypatch.setattr(activity.ActivityExecution, 'heartbeat',
lambda self: None)
resp = dict(task_resp='something')
custom_task = MagicMock(return_value=resp)
current_activity = activity.Activity(boto_client)
current_activity.runner = runner.Sync(custom_task)
val = current_activity.execute_activity(activity.ActivityExecution(
boto_client, 'activityId', 'taskToken', '{"context": "value"}'))
assert custom_task.called
assert val == resp
def test_hydrate_activity(monkeypatch, boto_client):
"""Test the hydratation of an activity.
"""
current_activity = activity.Activity(boto_client)
current_activity.hydrate(dict(
name='activity',
domain='domain',
requires=[],
on_exception=lambda actor, exception: print(exception),
tasks=[lambda: dict('val')]))
def test_create_activity(monkeypatch, boto_client):
"""Test the creation of an activity via `create`.
"""
create = activity.create(boto_client, 'domain_name', 'flow_name')
current_activity = create(name='activity_name')
assert isinstance(current_activity, activity.Activity)
assert current_activity.name == 'flow_name_activity_name'
assert current_activity.task_list == 'flow_name_activity_name'
assert current_activity.domain == 'domain_name'
assert current_activity.client == boto_client
def test_create_external_activity(monkeypatch, boto_client):
"""Test the creation of an external activity via `create`.
"""
create = activity.create(boto_client, 'domain_name', 'flow_name')
current_activity = create(
name='activity_name',
timeout=60,
heartbeat=40,
external=True)
assert isinstance(current_activity, activity.ExternalActivity)
assert current_activity.name == 'flow_name_activity_name'
assert current_activity.task_list == 'flow_name_activity_name'
assert current_activity.domain == 'domain_name'
assert isinstance(current_activity.runner, runner.External)
assert current_activity.runner.heartbeat() == 40
assert current_activity.runner.timeout() == 60
def test_create_activity_worker(monkeypatch):
"""Test the creation of an activity worker.
"""
from tests.fixtures.flows import example
worker = activity.ActivityWorker(example)
assert len(worker.activities) == 4
assert worker.flow is example
assert not worker.worker_activities
def test_instances_creation(monkeypatch, boto_client, generators):
"""Test the creation of an activity instance id with the use of a local
context.
"""
local_activity = activity.Activity(boto_client)
external_activity = activity.ExternalActivity(timeout=60)
for current_activity in [local_activity, external_activity]:
current_activity.generators = generators
if len(current_activity.generators):
instances = list(current_activity.instances(dict()))
assert len(instances) == pow(10, len(generators))
for instance in instances:
assert isinstance(instance.local_context.get('i'), int)
if len(generators) == 2:
assert isinstance(instance.local_context.get('d'), int)
else:
instances = list(current_activity.instances(dict()))
assert len(instances) == 1
assert isinstance(instances[0].local_context, dict)
# Context is empty since no generator was used.
assert not instances[0].local_context
def test_activity_timeouts(monkeypatch, boto_client, generators):
"""Test the creation of an activity timeouts.
More details: the timeout of a task is 120s, the schedule to start is 1000,
100 activities are going to be scheduled when the generator is set. The
schedule_to_start for all activities instance is: 10000 * 100 = 100k. The
schedule to close is 100k + duration of an activity (which is 120s * 2).
"""
timeout = 120
start_timeout = 1000
@task.decorate(timeout=timeout)
def local_task():
return
current_activity = activity.Activity(boto_client)
current_activity.hydrate(dict(schedule_to_start=start_timeout))
current_activity.generators = generators
current_activity.runner = runner.Sync(
local_task.fill(),
local_task.fill())
total_generators = pow(10, len(current_activity.generators))
schedule_to_start = start_timeout * total_generators
for instance in current_activity.instances({}):
assert current_activity.pool_size == total_generators
assert instance.schedule_to_start == schedule_to_start
assert instance.timeout == timeout * 2
assert instance.schedule_to_close == (
schedule_to_start + instance.timeout)
def test_external_activity_timeouts(monkeypatch, boto_client, generators):
"""Test the creation of an external activity timeouts.
"""
timeout = 120
start_timeout = 1000
current_activity = activity.ExternalActivity(timeout=timeout)
current_activity.hydrate(dict(schedule_to_start=start_timeout))
current_activity.generators = generators
total_generators = pow(10, len(current_activity.generators))
schedule_to_start = start_timeout * total_generators
for instance in current_activity.instances({}):
assert current_activity.pool_size == total_generators
assert instance.schedule_to_start == schedule_to_start
assert instance.timeout == timeout
assert instance.schedule_to_close == (
schedule_to_start + instance.timeout)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
def test_worker_run(monkeypatch, boto_client):
"""Test running the worker.
"""
from tests.fixtures.flows import example
worker = activity.ActivityWorker(example)
assert len(worker.activities) == 4
for current_activity in worker.activities:
monkeypatch.setattr(
current_activity, 'run', MagicMock(return_value=False))
worker.run()
assert len(worker.activities) == 4
for current_activity in worker.activities:
assert current_activity.run.called
def test_worker_run_with_skipped_activities(monkeypatch):
"""Test running the worker with defined activities.
"""
monkeypatch.setattr(activity.Activity, 'run', MagicMock(return_value=False))
from tests.fixtures.flows import example
worker = activity.ActivityWorker(example, activities=['activity_1'])
assert len(worker.worker_activities) == 1
for current_activity in worker.activities:
monkeypatch.setattr(
current_activity, 'run', MagicMock(return_value=False))
worker.run()
for current_activity in worker.activities:
if current_activity.name == 'activity_1':
assert current_activity.run.called
else:
assert not current_activity.run.called
def test_worker_infinite_loop():
"""Test the worker runner.
"""
spy = MagicMock()
class Activity:
def __init__(self):
self.count = 0
def run(self, identity=None):
spy()
self.count = self.count + 1
if self.count < 5:
return True
return False
activity_worker = Activity()
activity_worker.name = 'activity_name'
activity_worker.logger = MagicMock()
activity.worker_runner(activity_worker)
assert spy.called
assert spy.call_count == 5
def test_worker_infinite_loop_on_external(monkeypatch):
"""There is no worker for external activities.
"""
external_activity = activity.ExternalActivity(timeout=10)
current_run = external_activity.run
spy = MagicMock()
def run():
spy()
return current_run()
monkeypatch.setattr(external_activity, 'run', run)
activity.worker_runner(external_activity)
# This test might not fail, but it will hang the test suite since it is
# going to trigger an infinite loop.
assert spy.call_count == 1
def test_activity_launch_sequence():
"""Test available activities.
"""
from tests.fixtures.flows import example
# First available activity is the activity_1.
context = dict()
history = event.activity_states_from_events(decider.history['events'][:1])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert len(activities) == 1
assert len(uncomplete) == 4
assert activities[0].activity_worker == example.activity_1
# In between activities should not launch activities.
history = event.activity_states_from_events(decider.history['events'][:5])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert len(activities) == 0
assert len(uncomplete) == 4
# Two activities are launched in parallel: 2 and 3.
history = event.activity_states_from_events(decider.history['events'][:7])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert len(activities) == 2
assert example.activity_1 not in uncomplete
# Activity 3 completes before activity 2. Activity 4 depends on 2 and 3 to
# complete.
history = event.activity_states_from_events(decider.history['events'][:14])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert len(activities) == 0
assert example.activity_3 not in uncomplete
# Activity 2 - 3 completed.
history = event.activity_states_from_events(decider.history['events'][:22])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert len(activities) == 1
assert activities[0].activity_worker == example.activity_4
assert example.activity_1 not in uncomplete
assert example.activity_2 not in uncomplete
assert example.activity_3 not in uncomplete
# Close
history = event.activity_states_from_events(decider.history['events'][:25])
activities = list(
activity.find_available_activities(example, history, context))
uncomplete = list(
activity.find_uncomplete_activities(example, history, context))
assert not activities
assert not uncomplete
def test_create_activity_instance():
"""Test the creation of an activity instance.
"""
activity_mock = MagicMock()
activity_mock.name = 'foobar'
activity_mock.retry = 20
instance = activity.ActivityInstance(activity_mock)
assert activity_mock.name == instance.activity_name
assert activity_mock.retry == instance.retry
def test_create_activity_instance_id(monkeypatch):
"""Test the creation of an activity instance id.
"""
monkeypatch.setattr(utils, 'create_dictionary_key', MagicMock())
activity_mock = MagicMock()
activity_mock.name = 'activity'
instance = activity.ActivityInstance(activity_mock)
# No context was passed, so create_dictionary key didn't need to be
# called.
assert instance.id == activity_mock.name + '-1'
assert not utils.create_dictionary_key.called
def test_create_activity_instance_id_with_local_context(monkeypatch):
"""Test the creation of an activity instance id with the use of a local
context.
"""
monkeypatch.setattr(utils, 'create_dictionary_key', MagicMock())
activity_mock = MagicMock()
activity_mock.name = 'activity'
instance = activity.ActivityInstance(activity_mock, dict(foobar='yes'))
assert instance.id.startswith(activity_mock.name)
assert utils.create_dictionary_key.called
def test_create_activity_instance_input_without_runner(monkeypatch):
"""Test the creation of a context for an activity instance input without
specifying a runner.
"""
activity_mock = MagicMock()
activity_mock.name = 'activity'
activity_mock.runner = None
context = dict(context='yes')
instance = activity.ActivityInstance(activity_mock, context)
with pytest.raises(runner.RunnerMissing):
instance.create_execution_input()
def test_create_activity_instance_input(monkeypatch):
"""Test the creation of a context for an activity instance input.
"""
@task.decorate()
def task_a(value):
pass
activity_mock = MagicMock()
activity_mock.name = 'activity'
activity_mock.runner = runner.BaseRunner(task_a.fill(value='context'))
instance = activity.ActivityInstance(
activity_mock, local_context=dict(context='yes', unused='no'),
execution_context=dict(somemore='values'))
resp = instance.create_execution_input()
assert len(resp) == 4
assert resp.get('context') == 'yes'
assert 'somemore' not in resp
assert 'unused' not in resp
assert 'execution.domain' in resp
assert 'execution.run_id' in resp
assert 'execution.workflow_id' in resp
def test_create_activity_instance_input_without_decorate(monkeypatch):
"""Test the creation of a context input without the use of a decorator.
"""
def task_a(value):
pass
activity_mock = MagicMock()
activity_mock.name = 'activity'
context = dict(foo='bar')
local_context = dict(context='yes')
activity_mock.runner = runner.BaseRunner(task_a)
instance = activity.ActivityInstance(
activity_mock, local_context=local_context,
execution_context=context)
resp = instance.create_execution_input()
assert resp.get('foo') == 'bar'
assert resp.get('context') == 'yes'
def test_create_activity_instance_input_with_zero_or_empty_values(
monkeypatch):
"""Test the creation of a context for an activity instance input.
"""
@task.decorate()
def task_a(value1, value2, value3, value4):
pass
activity_mock = MagicMock()
activity_mock.name = 'activity'
activity_mock.runner = runner.BaseRunner(
task_a.fill(
value1='zero',
value2='empty_list',
value3='empty_dict',
value4='none'))
instance = activity.ActivityInstance(
activity_mock,
local_context=dict(
zero=0, empty_list=[], empty_dict={}, none=None))
resp = instance.create_execution_input()
assert len(resp) == 6
assert resp.get('zero') == 0
assert resp.get('empty_list') == []
assert resp.get('empty_dict') == {}
assert 'none' not in resp
def test_activity_state():
"""Test the creation of the activity state.
"""
activity_id = 'id'
state = activity.ActivityState(activity_id)
assert state.activity_id is activity_id
assert not state.get_last_state()
state.add_state(activity.ACTIVITY_FAILED)
state.add_state(activity.ACTIVITY_COMPLETED)
assert len(state.states) == 2
assert state.get_last_state() is activity.ACTIVITY_COMPLETED
result = 'foobar'
state.set_result(result)
assert state.result == result
with pytest.raises(Exception):
state.set_result('shouldnt reset')
assert state.result == result
|
|
import http
import tempfile
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.utils import AsyncIterator
from waterbutler.providers.dataverse import settings
from waterbutler.providers.dataverse.metadata import DataverseRevision
from waterbutler.providers.dataverse.metadata import DataverseDatasetMetadata
class DataverseProvider(provider.BaseProvider):
"""Provider for Dataverse
API Docs: http://guides.dataverse.org/en/4.5/api/
"""
NAME = 'dataverse'
def __init__(self, auth, credentials, settings):
"""
:param dict auth: Not used
:param dict credentials: Contains `token`
:param dict settings: Contains `host`, `doi`, `id`, and `name` of a dataset. Hosts::
- 'demo.dataverse.org': Harvard Demo Server
- 'dataverse.harvard.edu': Dataverse Production Server **(NO TEST DATA)**
- Other
"""
super().__init__(auth, credentials, settings)
self.BASE_URL = 'https://{0}'.format(self.settings['host'])
self.token = self.credentials['token']
self.doi = self.settings['doi']
self._id = self.settings['id']
self.name = self.settings['name']
self.metrics.add('host', {
'host': self.settings['host'],
'doi': self.doi,
'name': self.name,
'id': self._id,
})
self._metadata_cache = {}
def build_url(self, path, *segments, **query):
# Need to split up the dataverse subpaths and push them into segments
return super().build_url(*(tuple(path.split('/')) + segments), **query)
def can_duplicate_names(self):
return False
async def validate_v1_path(self, path, **kwargs):
if path != '/' and path.endswith('/'):
raise exceptions.NotFoundError(str(path))
return await self.validate_path(path, **kwargs)
async def validate_path(self, path, revision=None, **kwargs):
"""Ensure path is in configured dataset
:param str path: The path to a file
:param list metadata: List of file metadata from _get_data
"""
self.metrics.add('validate_path.revision', revision)
if path == '/':
wbpath = WaterButlerPath('/')
wbpath.revision = revision
return wbpath
path = path.strip('/')
wbpath = None
for item in (await self._maybe_fetch_metadata(version=revision)):
if path == item.extra['fileId']:
wbpath = WaterButlerPath('/' + item.name, _ids=(None, item.extra['fileId']))
wbpath = wbpath or WaterButlerPath('/' + path)
wbpath.revision = revision
return wbpath
async def revalidate_path(self, base, path, folder=False, revision=None):
path = path.strip('/')
wbpath = None
for item in (await self._maybe_fetch_metadata(version=revision)):
if path == item.name:
# Dataverse cant have folders
wbpath = base.child(item.name, _id=item.extra['fileId'], folder=False)
wbpath = wbpath or base.child(path, _id=None, folder=False)
wbpath.revision = revision or base.revision
return wbpath
async def _maybe_fetch_metadata(self, version=None, refresh=False):
if refresh or self._metadata_cache.get(version) is None:
for v in ((version, ) or ('latest', 'latest-published')):
self._metadata_cache[v] = await self._get_data(v)
if version:
return self._metadata_cache[version]
return sum(self._metadata_cache.values(), [])
async def download(self, path, revision=None, range=None, **kwargs):
"""Returns a ResponseWrapper (Stream) for the specified path
raises FileNotFoundError if the status from Dataverse is not 200
:param str path: Path to the file you want to download
:param str revision: Used to verify if file is in selected dataset
- 'latest' to check draft files
- 'latest-published' to check published files
- None to check all data
:param dict \*\*kwargs: Additional arguments that are ignored
:rtype: :class:`waterbutler.core.streams.ResponseStreamReader`
:raises: :class:`waterbutler.core.exceptions.DownloadError`
"""
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
resp = await self.make_request(
'GET',
self.build_url(settings.DOWN_BASE_URL, path.identifier, key=self.token),
range=range,
expects=(200, 206),
throws=exceptions.DownloadError,
)
return streams.ResponseStreamReader(resp)
async def upload(self, stream, path, **kwargs):
"""Zips the given stream then uploads to Dataverse.
This will delete existing draft files with the same name.
:param waterbutler.core.streams.RequestWrapper stream: The stream to put to Dataverse
:param str path: The filename prepended with '/'
:rtype: dict, bool
"""
stream = streams.ZipStreamReader(AsyncIterator([(path.name, stream)]))
# Write stream to disk (Necessary to find zip file size)
f = tempfile.TemporaryFile()
chunk = await stream.read()
while chunk:
f.write(chunk)
chunk = await stream.read()
stream = streams.FileStreamReader(f)
dv_headers = {
"Content-Disposition": "filename=temp.zip",
"Content-Type": "application/zip",
"Packaging": "http://purl.org/net/sword/package/SimpleZip",
"Content-Length": str(stream.size),
}
# Delete old file if it exists
if path.identifier:
await self.delete(path)
resp = await self.make_request(
'POST',
self.build_url(settings.EDIT_MEDIA_BASE_URL, 'study', self.doi),
headers=dv_headers,
auth=(self.token, ),
data=stream,
expects=(201, ),
throws=exceptions.UploadError
)
await resp.release()
# Find appropriate version of file
metadata = await self._get_data('latest')
files = metadata if isinstance(metadata, list) else []
file_metadata = next(file for file in files if file.name == path.name)
return file_metadata, path.identifier is None
async def delete(self, path, **kwargs):
"""Deletes the key at the specified path
:param str path: The path of the key to delete
"""
# Can only delete files in draft
path = await self.validate_path('/' + path.identifier, version='latest', throw=True)
resp = await self.make_request(
'DELETE',
self.build_url(settings.EDIT_MEDIA_BASE_URL, 'file', path.identifier),
auth=(self.token, ),
expects=(204, ),
throws=exceptions.DeleteError,
)
await resp.release()
async def metadata(self, path, version=None, **kwargs):
"""
:param str version:
- 'latest' for draft files
- 'latest-published' for published files
- None for all data
"""
version = version or path.revision
if path.is_root:
return (await self._maybe_fetch_metadata(version=version))
try:
return next(
item
for item in
(await self._maybe_fetch_metadata(version=version))
if item.extra['fileId'] == path.identifier
)
except StopIteration:
raise exceptions.MetadataError(
"Could not retrieve file '{}'".format(path),
code=http.client.NOT_FOUND,
)
async def revisions(self, path, **kwargs):
"""Get past versions of the request file. Orders versions based on
`_get_all_data()`
:param str path: The path to a key
:rtype list:
"""
metadata = await self._get_data()
return [
DataverseRevision(item.extra['datasetVersion'])
for item in metadata if item.extra['fileId'] == path.identifier
]
async def _get_data(self, version=None):
"""Get list of file metadata for a given dataset version
:param str version:
- 'latest' for draft files
- 'latest-published' for published files
- None for all data
"""
if not version:
return (await self._get_all_data())
url = self.build_url(
settings.JSON_BASE_URL.format(self._id, version),
key=self.token,
)
resp = await self.make_request(
'GET',
url,
expects=(200, ),
throws=exceptions.MetadataError
)
data = await resp.json()
data = data['data']
dataset_metadata = DataverseDatasetMetadata(
data, self.name, self.doi, version,
)
return [item for item in dataset_metadata.contents]
async def _get_all_data(self):
"""Get list of file metadata for all dataset versions"""
try:
published_data = await self._get_data('latest-published')
except exceptions.MetadataError as e:
if e.code != 404:
raise
published_data = []
draft_data = await self._get_data('latest')
# Prefer published to guarantee users get published version by default
return published_data + draft_data
|
|
import itertools
from ordereddict import OrderedDict
from django.conf import settings
from tower import ugettext_lazy as _lazy
# WARNING: When adding a new app feature here also include a migration.
#
# WARNING: Order matters here. Don't re-order these or alphabetize them. If you
# add new ones put them on the end.
#
# These are used to dynamically generate the field list for the AppFeatures
# django model in mkt.webapps.models.
APP_FEATURES = OrderedDict([
('APPS', {
'name': _lazy(u'App Management API'),
'description': _lazy(u'The app requires the `navigator.mozApps` API '
u'to install and manage other apps.'),
'apis': ('navigator.mozApps',),
}),
('PACKAGED_APPS', {
'name': _lazy(u'Packaged Apps Install API'),
'description': _lazy(
u'The app requires the `navigator.mozApps.installPackage` API '
u'to install other packaged apps.'),
'apis': ('navigator.mozApps.installPackage',),
}),
('PAY', {
'name': _lazy(u'Web Payment'),
'description': _lazy(u'The app requires the `navigator.mozApps` API.'),
'apis': ('navigator.pay', 'navigator.mozPay',),
}),
('ACTIVITY', {
'name': _lazy(u'Web Activities'),
'description': _lazy(u'The app requires Web Activities '
u'(the `MozActivity` API).'),
'apis': ('MozActivity',),
}),
('LIGHT_EVENTS', {
'name': _lazy(u'Ambient Light Sensor'),
'description': _lazy(u'The app requires an ambient light sensor '
u'(the `ondevicelight` API).'),
'apis': ('window.ondevicelight',),
}),
('ARCHIVE', {
'name': _lazy(u'Archive'),
'description': _lazy(u'The app requires the `ArchiveReader` API.'),
'apis': ('ArchiveReader',),
}),
('BATTERY', {
'name': _lazy(u'Battery'),
'description': _lazy(u'The app requires the `navigator.battery` API.'),
'apis': ('navigator.battery',),
}),
('BLUETOOTH', {
'name': u'Bluetooth',
'description': _lazy(u'The app requires the `navigator.mozBluetooth` '
u'API.'),
'apis': ('navigator.bluetooth', 'navigator.mozBluetooth'),
}),
('CONTACTS', {
'name': _lazy(u'Contacts'),
'description': _lazy(u'The app requires the `navigator.mozContacts` '
u'API.'),
'apis': ('navigator.contacts', 'navigator.mozContacts'),
}),
('DEVICE_STORAGE', {
'name': _lazy(u'Device Storage'),
'description': _lazy(u'The app requires the Device Storage API to '
u'access files on the filesystem.'),
'apis': ('navigator.getDeviceStorage',),
}),
('INDEXEDDB', {
'name': u'IndexedDB',
'description': _lazy(u'The app requires the platform to support '
u'IndexedDB.'),
'apis': ('navigator.indexedDB', 'navigator.mozIndexedDB'),
}),
('GEOLOCATION', {
'name': _lazy(u'Geolocation'),
'description': _lazy(u'The app requires the platform to support the '
u'`navigator.geolocation` API.'),
'apis': ('navigator.geolocation',),
}),
('IDLE', {
'name': _lazy(u'Idle'),
'description': _lazy(u'The app requires the platform to support the '
u'`addIdleObserver` API.'),
'apis': ('addIdleObserver', 'removeIdleObserver'),
}),
('NETWORK_INFO', {
'name': _lazy(u'Network Information'),
'description': _lazy(u'The app requires the ability to get '
u'information about the network connection (the '
u'`navigator.mozConnection` API).'),
'apis': ('navigator.mozConnection', 'navigator.mozMobileConnection'),
}),
('NETWORK_STATS', {
'name': _lazy(u'Network Stats'),
'description': _lazy(u'The app requires the '
u'`navigator.mozNetworkStats` API.'),
'apis': ('navigator.networkStats', 'navigator.mozNetworkStats'),
}),
('PROXIMITY', {
'name': _lazy(u'Proximity'),
'description': _lazy(u'The app requires a proximity sensor (the '
u'`ondeviceproximity` API).'),
'apis': ('navigator.ondeviceproximity',),
}),
('PUSH', {
'name': _lazy(u'Simple Push'),
'description': _lazy(u'The app requires the `navigator.mozPush` API.'),
'apis': ('navigator.push', 'navigator.mozPush'),
}),
('ORIENTATION', {
'name': _lazy(u'Screen Orientation'),
'description': _lazy(u'The app requires the platform to support the '
u'`ondeviceorientation` API.'),
'apis': ('ondeviceorientation',),
}),
('TIME_CLOCK', {
'name': _lazy(u'Time/Clock'),
'description': _lazy(u'The app requires the `navigator.mozTime` API.'),
'apis': ('navigator.time', 'navigator.mozTime'),
}),
('VIBRATE', {
'name': _lazy(u'Vibration'),
'description': _lazy(u'The app requires the device to support '
u'vibration (the `navigator.vibrate` API).'),
'apis': ('navigator.vibrate',),
}),
('FM', {
'name': u'WebFM',
'description': _lazy(u'The app requires the `navigator.mozFM` or '
u'`navigator.mozFMRadio` APIs.'),
'apis': ('navigator.mozFM', 'navigator.mozFMRadio'),
}),
('SMS', {
'name': u'WebSMS',
'description': _lazy(u'The app requires the `navigator.mozSms` API.'),
'apis': ('navigator.mozSms', 'navigator.mozSMS'),
}),
('TOUCH', {
'name': _lazy(u'Touch'),
'description': _lazy(u'The app requires the platform to support touch '
u'events. This option indicates that the app '
u'will not function when used with a mouse.'),
'apis': ('window.ontouchstart',),
}),
('QHD', {
'name': _lazy(u'Smartphone-Sized Displays (qHD)'),
'description': _lazy(u'The app requires the platform to have a '
u'smartphone-sized display (having qHD '
u'resolution). This option indicates that the '
u'app will be unusable on larger displays '
u'(e.g., tablets, desktop, large or high-DPI '
u'phones).'),
'apis': (),
}),
('MP3', {
'name': u'MP3',
'description': _lazy(u'The app requires that the platform can decode '
u'and play MP3 files.'),
'apis': (),
}),
('AUDIO', {
'name': _lazy(u'Audio'),
'description': _lazy(u'The app requires that the platform supports '
u'the HTML5 audio API.'),
'apis': ('Audio',),
}),
('WEBAUDIO', {
'name': _lazy(u'Web Audio'),
'description': _lazy(u'The app requires that the platform supports '
u'the Web Audio API (`window.AudioContext`).'),
'apis': ('AudioContext', 'mozAudioContext', 'webkitAudioContext'),
}),
('VIDEO_H264', {
'name': u'H.264',
'description': _lazy(u'The app requires that the platform can decode '
u'and play H.264 video files.'),
'apis': (),
}),
('VIDEO_WEBM', {
'name': u'WebM',
'description': _lazy(u'The app requires that the platform can decode '
u'and play WebM video files (VP8).'),
'apis': (),
}),
('FULLSCREEN', {
'name': _lazy(u'Full Screen'),
'description': _lazy(u'The app requires the Full Screen API '
u'(`requestFullScreen` or '
u'`mozRequestFullScreen`).'),
'apis': ('document.documentElement.requestFullScreen',),
}),
('GAMEPAD', {
'name': _lazy(u'Gamepad'),
'description': _lazy(u'The app requires the platform to support the '
u'gamepad API (`navigator.getGamepads`).'),
'apis': ('navigator.getGamepad', 'navigator.mozGetGamepad'),
}),
('QUOTA', {
'name': _lazy(u'Quota Management'),
'description': _lazy(u'The app requires the platform to allow '
u'persistent storage limit increases above the '
u'normally allowed limits for an app '
u'(`window.StorageInfo` or '
u'`window.persistentStorage`).'),
'apis': ('navigator.persistentStorage', 'navigator.temporaryStorage'),
}),
('CAMERA', {
'name': _lazy(u'Camera'),
'description': _lazy(u'The app requires the platform to allow access '
u'to video from the device camera via a '
u'LocalMediaStream object.'),
'apis': ('navigator.getUserMedia({video: true, picture: true})',),
}),
('MIC', {
'name': _lazy(u'Microphone'),
'description': _lazy(u'The app requires the platform to allow access '
u'to audio from the device microphone.'),
'apis': ('navigator.getUserMedia({audio: true})',),
}),
('SCREEN_CAPTURE', {
'name': _lazy(u'Screen Capture'),
'description': _lazy(u'The app requires the platform to allow access '
u'to the device screen for capture.'),
'apis': ('navigator.getUserMedia({video: {mandatory: '
'{chromeMediaSource: "screen"}}})',),
}),
('WEBRTC_MEDIA', {
'name': _lazy(u'WebRTC MediaStream'),
'description': _lazy(u'The app requires the platform to allow web '
u'real-time communication browser-to-browser '
u'inbound media streams.'),
'apis': ('MediaStream',),
}),
('WEBRTC_DATA', {
'name': _lazy(u'WebRTC DataChannel'),
'description': _lazy(u'The app requires the platform to allow '
u'peer-to-peer exchange of data other than audio '
u'and video.'),
'apis': ('DataChannel',),
}),
('WEBRTC_PEER', {
'name': _lazy(u'WebRTC PeerConnection'),
'description': _lazy(u'The app requires the platform to allow '
u'communication of streaming data between '
u'peers.'),
'apis': ('RTCPeerConnection',),
}),
('SPEECH_SYN', {
'name': _lazy(u'Web Speech Synthesis'),
'description': _lazy(u'The app requires the platform to allow the use '
u'of text-to-speech.'),
'apis': ('SpeechSynthesis',)
}),
('SPEECH_REC', {
'name': _lazy(u'Web Speech Recognition'),
'description': _lazy(u'The app requires the platform to allow '
u'the use of speech-to-text.'),
'apis': ('SpeechRecognition',)
}),
('POINTER_LOCK', {
'name': _lazy(u'Pointer Lock'),
'description': _lazy(u'The app requires the platform to provide '
u'additional information and control about the '
u'pointer.'),
'apis': ('document.documentElement.requestPointerLock',)
}),
('NOTIFICATION', {
'name': _lazy(u'Notifications'),
'description': _lazy(u'The app requires the platform to allow the '
u'displaying phone and desktop notifications to '
u'the user.'),
'apis': ('Notification', 'navigator.mozNotification')
}),
('ALARM', {
'name': _lazy(u'Alarms'),
'description': _lazy(u'The app requires the platform to provide '
u'access to the device alarm settings to '
u'schedule notifications and events at specific '
u'time.'),
'apis': ('navigator.mozAlarms',)
}),
('SYSTEMXHR', {
'name': _lazy(u'SystemXHR'),
'description': _lazy(u'The app requires the platform to allow the '
u'sending of asynchronous HTTP requests without '
u'the restrictions of the same-origin policy.'),
'apis': ('XMLHttpRequest({mozSystem: true})',)
}),
('TCPSOCKET', {
'name': _lazy(u'TCP Sockets'),
'description': _lazy(u'The app requires the platform to allow opening '
u'raw TCP sockets.'),
'apis': ('TCPSocket', 'navigator.mozTCPSocket')
}),
('THIRDPARTY_KEYBOARD_SUPPORT', {
'name': _lazy(u'Third-Party Keyboard Support'),
'description': _lazy(u'The app requires the platform to support '
u'third-party keyboards.'),
'apis': ('navigator.mozInputMethod',),
}),
('NETWORK_INFO_MULTIPLE', {
'name': _lazy(u'Multiple Network Information'),
'description': _lazy(u'The app requires the ability to get '
u'information about multiple network '
u'connections.'),
'apis': ('navigator.mozMobileConnections',),
}),
('MOBILEID', {
'name': _lazy(u'Mobile ID'),
'description': _lazy(u'The app requires access to the '
u'`navigator.getMobileIdAssertion` API.'),
'apis': ('navigator.getMobileIdAssertion',),
}),
('PRECOMPILE_ASMJS', {
'name': _lazy(u'Asm.js Precompilation'),
'description': _lazy(u'The app requires the device to support '
u'precompilation of asm.js code.'),
'apis': (),
}),
('HARDWARE_512MB_RAM', {
'name': _lazy(u'512MB RAM Device'),
'description': _lazy(u'The app requires the device to have at least '
u'512MB RAM.'),
'apis': (),
}),
('HARDWARE_1GB_RAM', {
'name': _lazy(u'1GB RAM Device'),
'description': _lazy(u'The app requires the device to have at least '
u'1GB RAM.'),
'apis': (),
}),
('NFC', {
'name': _lazy(u'NFC'),
'description': _lazy(u'The app requires access to the Near Field '
u'Communication (NFC) API.'),
'apis': ('navigator.mozNfc',),
})
])
PRERELEASE_PERMISSIONS = [
'moz-attention',
'moz-firefox-accounts',
'moz-audio-channel-telephony',
'moz-audio-channel-ringer',
]
class FeatureProfile(OrderedDict):
"""
Convenience class for performing conversion operations on feature profile
representations.
"""
def __init__(self, _default=False, **kwargs):
"""
Creates a FeatureProfile object.
Takes kwargs to the features to enable or disable. Features not
specified but that are in APP_FEATURES will be False by default.
E.g.:
>>> FeatureProfile(sms=True).to_signature()
'400.32.1'
"""
super(FeatureProfile, self).__init__()
for af in APP_FEATURES:
key = af.lower()
self[key] = kwargs.get(key, _default)
@classmethod
def from_int(cls, features, limit=None):
"""
Construct a FeatureProfile object from a integer bitfield.
>>> FeatureProfile.from_int(0x42)
FeatureProfile([('apps', False), ('packaged_apps', True), ...)
"""
instance = cls() # Defaults to everything set to False.
if limit is None:
limit = len(APP_FEATURES)
app_features_to_consider = OrderedDict(
itertools.islice(APP_FEATURES.iteritems(), limit))
for i, k in enumerate(reversed(app_features_to_consider)):
instance[k.lower()] = bool(features & 1 << i)
return instance
@classmethod
def from_signature(cls, signature):
"""
Construct a FeatureProfile object from a decimal signature.
>>> FeatureProfile.from_signature('40000000.32.1')
FeatureProfile([('apps', False), ('packaged_apps', True), ...)
"""
# If the signature is invalid, let the ValueError be raised, it's up to
# the caller to decide what to do with it.
number, limit, version = signature.split('.')
return cls.from_int(int(number, 16), limit=int(limit))
def to_int(self):
"""
Convert a FeatureProfile object to an integer bitfield.
>>> profile.to_int()
66
"""
features = 0
for i, v in enumerate(reversed(self.values())):
features |= bool(v) << i
return features
def to_signature(self):
"""
Convert a FeatureProfile object to its decimal signature.
>>> profile.to_signature()
'40000000.32.1'
"""
return '%x.%s.%s' % (self.to_int(), len(self),
settings.APP_FEATURES_VERSION)
def to_list(self):
"""
Returns a list representing the true values of this profile.
"""
return [k for k, v in self.iteritems() if v]
def to_kwargs(self, prefix=''):
"""
Returns a dict representing the false values of this profile.
Parameters:
- `prefix` - a string prepended to the key name. Helpful if being used
to traverse relations
This only includes keys for which the profile is False, which is useful
for querying apps where we want to filter by apps which do not require
a feature.
>>> profile = FeatureProject.from_signature(request.get('pro'))
>>> Webapp.objects.filter(**profile.to_kwargs())
"""
return dict((prefix + k, False) for k, v in self.iteritems() if not v)
|
|
"""Tools for creating and running the model."""
from __future__ import print_function, division
import os
import numpy as np
from scipy import integrate
from scipy import stats
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import brier_score_loss
from sklearn.neighbors import KernelDensity
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import NotFittedError
import preprocessing
import utilities
class WPModel(object):
"""The object that computes win probabilities.
In addition to holding the model itself, it defines some columns names likely to be
used in the model as parameters to allow other users to more easily figure out which
columns go into the model.
Parameters
----------
copy_data : boolean (default=``True``)
Whether or not to copy data when fitting and applying the model. Running the model
in-place (``copy_data=False``) will be faster and have a smaller memory footprint,
but if not done carefully can lead to data integrity issues.
Attributes
----------
model : A Scikit-learn pipeline (or equivalent)
The actual model used to compute WP. Upon initialization it will be set to
a default model, but can be overridden by the user.
column_descriptions : dictionary
A dictionary whose keys are the names of the columns used in the model, and the values are
string descriptions of what the columns mean. Set at initialization to be the default model,
if you create your own model you'll need to update this attribute manually.
training_seasons : A list of ints, or ``None`` (default=``None``)
If the model was trained using data downloaded from nfldb, a list of the seasons
used to train the model. If nfldb was **not** used, an empty list. If no model
has been trained yet, ``None``.
training_season_types : A list of strings or ``None`` (default=``None``)
Same as ``training_seasons``, except for the portions of the seasons used in training the
model ("Preseason", "Regular", and/or "Postseason").
validation_seasons : same as ``training_seasons``, but for validation data.
validation_season_types : same as ``training_season_types``, but for validation data.
sample_probabilities : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the sampled predicted probabilities used to
compute the validation statistic.
predicted_win_percents : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the actual probabilities in the test
set at each probability in ``sample_probabilities``.
num_plays_used : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the number of plays used to compute each
element of ``predicted_win_percents``.
model_directory : string
The directory where all models will be saved to or loaded from.
"""
model_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
_default_model_filename = "default_model.nflwin"
def __init__(self,
copy_data=True
):
self.copy_data = copy_data
self.model = self.create_default_pipeline()
self._training_seasons = None
self._training_season_types = None
self._validation_seasons = None
self._validation_season_types = None
self._sample_probabilities = None
self._predicted_win_percents = None
self._num_plays_used = None
@property
def training_seasons(self):
return self._training_seasons
@property
def training_seasons_types(self):
return self._training_season_types
@property
def validation_seasons(self):
return self._validation_seasons
@property
def validation_seasons_types(self):
return self._validation_season_types
@property
def sample_probabilities(self):
return self._sample_probabilities
@property
def predicted_win_percents(self):
return self._predicted_win_percents
@property
def num_plays_used(self):
return self._num_plays_used
def train_model(self,
source_data="nfldb",
training_seasons=[2009, 2010, 2011, 2012, 2013, 2014],
training_season_types=["Regular", "Postseason"],
target_colname="offense_won"):
"""Train the model.
Once a modeling pipeline is set up (either the default or something
custom-generated), historical data needs to be fed into it in order to
"fit" the model so that it can then be used to predict future results.
This method implements a simple wrapper around the core Scikit-learn functionality
which does this.
The default is to use data from the nfldb database, however that can be changed
to a simple Pandas DataFrame if desired (for instance if you wish to use data
from another source).
There is no particular output from this function, rather the parameters governing
the fit of the model are saved inside the model object itself. If you want to get an
estimate of the quality of the fit, use the ``validate_model`` method after running
this method.
Notes
-----
If you are loading in the default model, **there is no need to re-run this method**.
In fact, doing so will likely result in weird errors and could corrupt the model if you
were to try to save it back to disk.
Parameters
----------
source_data : the string ``"nfldb"`` or a Pandas DataFrame (default=``"nfldb"``)
The data to be used to train the model. If ``"nfldb"``, will query the nfldb
database for the training data (note that this requires a correctly configured
installation of nfldb's database).
training_seasons : list of ints (default=``[2009, 2010, 2011, 2012, 2013, 2014]``)
What seasons to use to train the model if getting data from the nfldb database.
If ``source_data`` is not ``"nfldb"``, this argument will be ignored.
**NOTE:** it is critical not to use all possible data in order to train the
model - some will need to be reserved for a final validation (see the
``validate_model`` method). A good dataset to reserve
for validation is the most recent one or two NFL seasons.
training_season_types : list of strings (default=``["Regular", "Postseason"]``)
If querying from the nfldb database, what parts of the seasons to use.
Options are "Preseason", "Regular", and "Postseason". If ``source_data`` is not
``"nfldb"``, this argument will be ignored.
target_colname : string or integer (default=``"offense_won"``)
The name of the target variable column.
Returns
-------
``None``
"""
self._training_seasons = []
self._training_season_types = []
if isinstance(source_data, basestring):
if source_data == "nfldb":
source_data = utilities.get_nfldb_play_data(season_years=training_seasons,
season_types=training_season_types)
self._training_seasons = training_seasons
self._training_season_types = training_season_types
else:
raise ValueError("WPModel: if source_data is a string, it must be 'nfldb'")
target_col = source_data[target_colname]
feature_cols = source_data.drop(target_colname, axis=1)
self.model.fit(feature_cols, target_col)
def validate_model(self,
source_data="nfldb",
validation_seasons=[2015],
validation_season_types=["Regular", "Postseason"],
target_colname="offense_won"):
"""Validate the model.
Once a modeling pipeline is trained, a different dataset must be fed into the trained model
to validate the quality of the fit.
This method implements a simple wrapper around the core Scikit-learn functionality
which does this.
The default is to use data from the nfldb database, however that can be changed
to a simple Pandas DataFrame if desired (for instance if you wish to use data
from another source).
The output of this method is a p value which represents the confidence at which
we can reject the null hypothesis that the model predicts the appropriate win
probabilities. This number is computed by first smoothing the predicted win probabilities of both all test data and
just the data where the offense won with a gaussian `kernel density
estimate <http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity>`_
with standard deviation = 0.01. Once the data is smooth, ratios at each percentage point from 1% to 99% are computed (i.e.
what fraction of the time did the offense win when the model says they have a 1% chance of winning, 2% chance, etc.). Each of
these ratios should be well approximated by the binomial distribution, since they are essentially independent (not perfectly
but hopefully close enough) weighted coin flips, giving a p value. From there `Fisher's method <https://en.wikipedia.org/wiki/Fisher%27s_method>`_
is used to combine the p values into a global p value. A p value close to zero means that the model is unlikely to be
properly predicting the correct win probabilities. A p value close to one, **while not proof that the model is correct**,
means that the model is at least not inconsistent with the hypothesis that it predicts good win probabilities.
Parameters
----------
source_data : the string ``"nfldb"`` or a Pandas DataFrame (default=``"nfldb"``)
The data to be used to train the model. If ``"nfldb"``, will query the nfldb
database for the training data (note that this requires a correctly configured
installation of nfldb's database).
training_seasons : list of ints (default=``[2015]``)
What seasons to use to validate the model if getting data from the nfldb database.
If ``source_data`` is not ``"nfldb"``, this argument will be ignored.
**NOTE:** it is critical not to use the same data to validate the model as was used
in the fit. Generally a good data set to use for validation is one from a time
period more recent than was used to train the model. For instance, if the model was trained
on data from 2009-2014, data from the 2015 season would be a sensible choice to validate the model.
training_season_types : list of strings (default=``["Regular", "Postseason"]``)
If querying from the nfldb database, what parts of the seasons to use.
Options are "Preseason", "Regular", and "Postseason". If ``source_data`` is not
``"nfldb"``, this argument will be ignored.
target_colname : string or integer (default=``"offense_won"``)
The name of the target variable column.
Returns
-------
float, between 0 and 1
The combined p value, where smaller values indicate that the model is not accurately predicting win
probabilities.
Raises
------
NotFittedError
If the model hasn't been fit.
Notes
-----
Probabilities are computed between 1 and 99 percent because a single incorrect prediction at 100% or 0% automatically drives
the global p value to zero. Since the model is being smoothed this situation can occur even when there are no model predictions
at those extreme values, and therefore leads to erroneous p values.
While it seems reasonable (to me at least), I am not totally certain that this approach is entirely correct.
It's certainly sub-optimal in that you would ideally reject the null hypothesis that the model predictions
**aren't** appropriate, but that seems to be a much harder problem (and one that would need much more test
data to beat down the uncertainties involved). I'm also not sure if using Fisher's method is appropriate here,
and I wonder if it might be necessary to Monte Carlo this. I would welcome input from others on better ways to do this.
"""
if self.training_seasons is None:
raise NotFittedError("Must fit model before validating.")
self._validation_seasons = []
self._validation_season_types = []
if isinstance(source_data, basestring):
if source_data == "nfldb":
source_data = utilities.get_nfldb_play_data(season_years=validation_seasons,
season_types=validation_season_types)
self._validation_seasons = validation_seasons
self._validation_season_types = validation_season_types
else:
raise ValueError("WPModel: if source_data is a string, it must be 'nfldb'")
target_col = source_data[target_colname]
feature_cols = source_data.drop(target_colname, axis=1)
predicted_probabilities = self.model.predict_proba(feature_cols)[:,1]
self._sample_probabilities, self._predicted_win_percents, self._num_plays_used = (
WPModel._compute_predicted_percentages(target_col.values, predicted_probabilities))
#Compute the maximal deviation from a perfect prediction as well as the area under the
#curve of the residual between |predicted - perfect|:
max_deviation, residual_area = self._compute_prediction_statistics(self.sample_probabilities,
self.predicted_win_percents)
return max_deviation, residual_area
#Compute p-values for each where null hypothesis is that distributions are same, then combine
#them all to make sure data is not inconsistent with accurate predictions.
# combined_pvalue = self._test_distribution(self.sample_probabilities,
# self.predicted_win_percents,
# self.num_plays_used)
# return combined_pvalue
@staticmethod
def _compute_prediction_statistics(sample_probabilities, predicted_win_percents):
"""Take the KDE'd model estimates, then compute statistics.
Returns
-------
A tuple of (``max_deviation``, ``residual_area``), where ``max_deviation``
is the largest discrepancy between the model and expectation at any WP,
and ``residual_area`` is the total area under the curve of |predicted WP - expected WP|.
"""
abs_deviations = np.abs(predicted_win_percents - sample_probabilities)
max_deviation = np.max(abs_deviations)
residual_area = integrate.simps(abs_deviations,
sample_probabilities)
return (max_deviation, residual_area)
def predict_wp(self, plays):
"""Estimate the win probability for a set of plays.
Basically a simple wrapper around ``WPModel.model.predict_proba``,
takes in a DataFrame and then spits out an array of predicted
win probabilities.
Parameters
----------
plays : Pandas DataFrame
The input data to use to make the predictions.
Returns
-------
Numpy array, of length ``len(plays)``
Predicted probability that the offensive team in each play
will go on to win the game.
Raises
------
NotFittedError
If the model hasn't been fit.
"""
if self.training_seasons is None:
raise NotFittedError("Must fit model before predicting WP.")
return self.model.predict_proba(plays)[:,1]
def plot_validation(self, axis=None, **kwargs):
"""Plot the validation data.
Parameters
----------
axis : matplotlib.pyplot.axis object or ``None`` (default=``None``)
If provided, the validation line will be overlaid on ``axis``.
Otherwise, a new figure and axis will be generated and plotted on.
**kwargs
Arguments to ``axis.plot``.
Returns
-------
matplotlib.pylot.axis
The axis the plot was made on.
Raises
------
NotFittedError
If the model hasn't been fit **and** validated.
"""
if self.sample_probabilities is None:
raise NotFittedError("Must validate model before plotting.")
import matplotlib.pyplot as plt
if axis is None:
axis = plt.figure().add_subplot(111)
axis.plot([0, 100], [0, 100], ls="--", lw=2, color="black")
axis.set_xlabel("Predicted WP")
axis.set_ylabel("Actual WP")
axis.plot(self.sample_probabilities,
self.predicted_win_percents,
**kwargs)
return axis
@staticmethod
def _test_distribution(sample_probabilities, predicted_win_percents, num_plays_used):
"""Based off assuming the data at each probability is a Bernoulli distribution."""
#Get the p-values:
p_values = [stats.binom_test(np.round(predicted_win_percents[i] * num_plays_used[i]),
np.round(num_plays_used[i]),
p=sample_probabilities[i]) for i in range(len(sample_probabilities))]
combined_p_value = stats.combine_pvalues(p_values)[1]
return(combined_p_value)
@staticmethod
def _compute_predicted_percentages(actual_results, predicted_win_probabilities):
"""Compute the sample percentages from a validation data set.
"""
kde_offense_won = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(
(predicted_win_probabilities[(actual_results == 1)])[:, np.newaxis])
kde_total = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(
predicted_win_probabilities[:, np.newaxis])
sample_probabilities = np.linspace(0.01, 0.99, 99)
number_density_offense_won = np.exp(kde_offense_won.score_samples(sample_probabilities[:, np.newaxis])) * np.sum((actual_results))
number_density_total = np.exp(kde_total.score_samples(sample_probabilities[:, np.newaxis])) * len(actual_results)
number_offense_won = number_density_offense_won * np.sum(actual_results) / np.sum(number_density_offense_won)
number_total = number_density_total * len(actual_results) / np.sum(number_density_total)
predicted_win_percents = number_offense_won / number_total
return 100.*sample_probabilities, 100.*predicted_win_percents, number_total
def create_default_pipeline(self):
"""Create the default win probability estimation pipeline.
Returns
-------
Scikit-learn pipeline
The default pipeline, suitable for computing win probabilities
but by no means the best possible model.
This can be run any time a new default pipeline is required,
and either set to the ``model`` attribute or used independently.
"""
steps = []
offense_team_colname = "offense_team"
home_team_colname = "home_team"
home_score_colname = "curr_home_score"
away_score_colname = "curr_away_score"
down_colname = "down"
quarter_colname = "quarter"
time_colname = "seconds_elapsed"
yardline_colname = "yardline"
yards_to_go_colname="yards_to_go"
self.column_descriptions = {
offense_team_colname: "Abbreviation for the offensive team",
home_team_colname: "Abbreviation for the home team",
away_score_colname: "Abbreviation for the visiting team",
down_colname: "The current down",
yards_to_go_colname: "Yards to a first down (or the endzone)",
quarter_colname: "The quarter",
time_colname: "Seconds elapsed in the quarter",
yardline_colname: ("The yardline, given by (yards from own goalline - 50). "
"-49 is your own 1 while 49 is the opponent's 1.")
}
is_offense_home = preprocessing.ComputeIfOffenseIsHome(offense_team_colname,
home_team_colname,
copy=self.copy_data)
steps.append(("compute_offense_home", is_offense_home))
score_differential = preprocessing.CreateScoreDifferential(home_score_colname,
away_score_colname,
is_offense_home.offense_home_team_colname,
copy=self.copy_data)
steps.append(("create_score_differential", score_differential))
steps.append(("map_downs_to_int", preprocessing.MapToInt(down_colname, copy=self.copy_data)))
total_time_elapsed = preprocessing.ComputeElapsedTime(quarter_colname, time_colname, copy=self.copy_data)
steps.append(("compute_total_time_elapsed", total_time_elapsed))
steps.append(("remove_unnecessary_columns", preprocessing.CheckColumnNames(
column_names=[is_offense_home.offense_home_team_colname,
score_differential.score_differential_colname,
total_time_elapsed.total_time_colname,
yardline_colname,
yards_to_go_colname,
down_colname],
copy=self.copy_data)))
steps.append(("encode_categorical_columns", preprocessing.OneHotEncoderFromDataFrame(
categorical_feature_names=[down_colname],
copy=self.copy_data)))
search_grid = {'base_estimator__penalty': ['l1', 'l2'],
'base_estimator__C': [0.01, 0.1, 1, 10, 100]
}
base_model = LogisticRegression()
calibrated_model = CalibratedClassifierCV(base_model, cv=2, method="isotonic")
#grid_search_model = GridSearchCV(calibrated_model, search_grid,
# scoring=self._brier_loss_scorer)
steps.append(("compute_model", calibrated_model))
pipe = Pipeline(steps)
return pipe
def save_model(self, filename=None):
"""Save the WPModel instance to disk.
All models are saved to the same place, with the installed
NFLWin library (given by ``WPModel.model_directory``).
Parameters
----------
filename : string (default=None):
The filename to use for the saved model. If this parameter
is not specified, save to the default filename. Note that if a model
already lists with this filename, it will be overwritten. Note also that
this is a filename only, **not** a full path. If a full path is specified
it is likely (albeit not guaranteed) to cause errors.
Returns
-------
``None``
"""
if filename is None:
filename = self._default_model_filename
joblib.dump(self, os.path.join(self.model_directory, filename))
@classmethod
def load_model(cls, filename=None):
"""Load a saved WPModel.
Parameters
----------
Same as ``save_model``.
Returns
-------
``nflwin.WPModel`` instance.
"""
if filename is None:
filename = cls._default_model_filename
return joblib.load(os.path.join(cls.model_directory, filename))
@staticmethod
def _brier_loss_scorer(estimator, X, y):
"""Use the Brier loss to estimate model score.
For use in GridSearchCV, instead of accuracy.
"""
predicted_positive_probabilities = estimator.predict_proba(X)[:, 1]
return 1. - brier_score_loss(y, predicted_positive_probabilities)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
def nearest_neighbor_interp_np(X,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
data_layout='NCHW'):
"""nearest neighbor interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
n, c, in_h, in_w = X.shape
ratio_h = ratio_w = 0.0
if (out_h > 1):
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if (out_w > 1):
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((n, c, out_h, out_w))
if align_corners:
for i in range(out_h):
in_i = int(ratio_h * i + 0.5)
for j in range(out_w):
in_j = int(ratio_w * j + 0.5)
out[:, :, i, j] = X[:, :, in_i, in_j]
else:
for i in range(out_h):
in_i = int(ratio_h * i)
for j in range(out_w):
in_j = int(ratio_w * j)
out[:, :, i, j] = X[:, :, in_i, in_j]
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(X.dtype)
class TestNearestInterpOp(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "nearest_interp"
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_np(
input_np, out_h, out_w, self.out_size, self.actual_shape,
self.align_corners, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'data_layout': self.data_layout
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 4, 4]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 128, 64]
self.out_h = 64
self.out_w = 128
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase4(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase5(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase6(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 128, 64]
self.out_h = 64
self.out_w = 128
self.scale = 0.
self.out_size = np.array([65, 129]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpSame(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 128, 64]
self.out_h = 128
self.out_w = 64
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpActualShape(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpDataLayout(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 4, 4, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 8]).astype("int32")
self.align_corners = True
self.data_layout = "NHWC"
class TestNearestInterpOpUint8(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "nearest_interp"
input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8")
if self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.align_corners)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(place=core.CPUPlace(), atol=1)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 3, 9, 6]
self.out_h = 10
self.out_w = 9
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 128, 64]
self.out_h = 120
self.out_w = 50
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 5
self.out_w = 13
self.scale = 0.
self.out_size = np.array([6, 15]).astype("int32")
self.align_corners = True
class TestNearestInterpWithoutCorners(TestNearestInterpOp):
def set_align_corners(self):
self.align_corners = False
class TestNearestNeighborInterpScale1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = 2.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpScale2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 5, 7]
self.out_h = 64
self.out_w = 32
self.scale = 1.5
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpScale3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = 1.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestInterpOp_attr_tensor(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "nearest_interp"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float32")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
self.attrs['scale'] = self.scale
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 4, 4]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.out_size = [3, 3]
self.align_corners = True
# out_size is a tensor list
class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = [8, 12]
self.align_corners = True
# out_size is a 1-D tensor
class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
class TestNearestAPI(OpTest):
def test_case(self):
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32")
out1 = fluid.layers.resize_nearest(
y, out_shape=[12, 12], data_format='NHWC')
out2 = fluid.layers.resize_nearest(x, out_shape=[12, dim])
out3 = fluid.layers.resize_nearest(x, out_shape=shape_tensor)
out4 = fluid.layers.resize_nearest(
x, out_shape=[4, 4], actual_shape=actual_size)
out5 = fluid.layers.resize_nearest(x, scale=scale_tensor)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
shape_data = np.array([12, 12]).astype("int32")
actual_size_data = np.array([12, 12]).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
feed={
"x": x_data,
"y": np.transpose(x_data, (0, 2, 3, 1)),
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data
},
fetch_list=[out1, out2, out3, out4, out5],
return_numpy=True)
expect_res = nearest_neighbor_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
self.assertTrue(
np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1))))
for i in range(len(results) - 1):
self.assertTrue(np.allclose(results[i + 1], expect_res))
class TestNearestInterpException(OpTest):
def test_exception(self):
input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32")
def attr_data_format():
# for 4-D input, data_format can only be NCHW or NHWC
out = fluid.layers.resize_nearest(
input, out_shape=[4, 8], data_format='NDHWC')
def attr_scale_type():
out = fluid.layers.resize_nearest(input, scale='scale')
def attr_scale_value():
out = fluid.layers.resize_nearest(input, scale=-0.3)
self.assertRaises(ValueError, attr_data_format)
self.assertRaises(TypeError, attr_scale_type)
self.assertRaises(ValueError, attr_scale_value)
if __name__ == "__main__":
unittest.main()
|
|
import os
import sys
import unittest
import ast
import weakref
from test import support
def to_tuple(t):
if t is None or isinstance(t, (str, int, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be at least one test per statement
exec_tests = [
# None
"None",
# FunctionDef
"def f(): pass",
# FunctionDef with arg
"def f(a): pass",
# FunctionDef with arg and default value
"def f(a=0): pass",
# FunctionDef with varargs
"def f(*args): pass",
# FunctionDef with kwargs
"def f(**kwargs): pass",
# FunctionDef with all kind of args
"def f(a, b=1, c=None, d=[], e={}, *args, **kwargs): pass",
# ClassDef
"class C:pass",
# ClassDef, new style class
"class C(object): pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
# AugAssign
"v += 1",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# With
"with x as y: pass",
"with x as y, z as q: pass",
# Raise
"raise Exception('string')",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
"break",
# Continue
"continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"[(a,b) for a,b in c]",
"((a,b) for a,b in c)",
"((a,b) for (a,b) in c)",
# Multiline generator expression (test for .lineno & .col_offset)
"""(
(
Aa
,
Bb
)
for
Aa
,
Bb in Cc
)""",
# dictcomp
"{a : b for w in x for m in p if g}",
# dictcomp with naked tuple
"{a : b for v,w in x}",
# setcomp
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# None
"None",
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
# Dict
"{ 1:2 }",
# Empty dict
"{}",
# Set
"{None,}",
# Multiline dict (test for .lineno & .col_offset)
"""{
1
:
2
}""",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c if d)",
# Yield - yield expressions can't work outside a function
#
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Num
"10",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Empty list
"[]",
# Tuple
"1,2,3",
# Tuple
"(1,2,3)",
# Empty tuple
"()",
# Combination
"a.b.c.d(a.b[1:2])",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertTrue(node_pos >= parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
for child in value:
self._assertTrueorder(child, parent_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
def test_AST_objects(self):
if not support.check_impl_detail():
# PyPy also provides a __dict__ to the ast.AST base class.
return
x = ast.AST()
self.assertEqual(x._fields, ())
x.foobar = 42
self.assertEqual(x.foobar, 42)
self.assertEqual(x.__dict__["foobar"], 42)
with self.assertRaises(AttributeError):
x.vararg
with self.assertRaises(TypeError):
# "_ast.AST constructor takes 0 positional arguments"
ast.AST(2)
def test_AST_garbage_collection(self):
class X:
pass
a = ast.AST()
a.x = X()
a.x.a = a
ref = weakref.ref(a.x)
del a
support.gc_collect()
self.assertIsNone(ref())
def test_snippets(self):
for input, output, kind in ((exec_tests, exec_results, "exec"),
(single_tests, single_results, "single"),
(eval_tests, eval_results, "eval")):
for i, o in zip(input, output):
ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
self.assertEqual(to_tuple(ast_tree), o)
self._assertTrueorder(ast_tree, (0, 0))
def test_slice(self):
slc = ast.parse("x[::]").body[0].value.slice
self.assertIsNone(slc.upper)
self.assertIsNone(slc.lower)
self.assertIsNone(slc.step)
def test_from_import(self):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
def test_non_interned_future_from_ast(self):
mod = ast.parse("from __future__ import division")
self.assertIsInstance(mod.body[0], ast.ImportFrom)
mod.body[0].module = " __future__ ".strip()
compile(mod, "<test>", "exec")
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
self.assertTrue(issubclass(ast.stmt, ast.AST))
self.assertTrue(issubclass(ast.expr, ast.AST))
self.assertTrue(issubclass(ast.comprehension, ast.AST))
self.assertTrue(issubclass(ast.Gt, ast.AST))
def test_field_attr_existence(self):
for name, item in ast.__dict__.items():
if isinstance(item, type) and name != 'AST' and name[0].isupper():
x = item()
if isinstance(x, ast.AST):
self.assertEqual(type(x._fields), tuple)
def test_arguments(self):
x = ast.arguments()
self.assertEqual(x._fields, ('args', 'vararg', 'varargannotation',
'kwonlyargs', 'kwarg', 'kwargannotation',
'defaults', 'kw_defaults'))
with self.assertRaises(AttributeError):
x.vararg
x = ast.arguments(*range(1, 9))
self.assertEqual(x.vararg, 2)
def test_field_attr_writable(self):
x = ast.Num()
# We can assign to _fields
x._fields = 666
self.assertEqual(x._fields, 666)
def test_classattrs(self):
x = ast.Num()
self.assertEqual(x._fields, ('n',))
with self.assertRaises(AttributeError):
x.n
x = ast.Num(42)
self.assertEqual(x.n, 42)
with self.assertRaises(AttributeError):
x.lineno
with self.assertRaises(AttributeError):
x.foobar
x = ast.Num(lineno=2)
self.assertEqual(x.lineno, 2)
x = ast.Num(42, lineno=0)
self.assertEqual(x.lineno, 0)
self.assertEqual(x._fields, ('n',))
self.assertEqual(x.n, 42)
self.assertRaises(TypeError, ast.Num, 1, 2)
self.assertRaises(TypeError, ast.Num, 1, 2, lineno=0)
def test_module(self):
body = [ast.Num(42)]
x = ast.Module(body)
self.assertEqual(x.body, body)
def test_nodeclasses(self):
# Zero arguments constructor explicitly allowed
x = ast.BinOp()
self.assertEqual(x._fields, ('left', 'op', 'right'))
# Random attribute allowed too
x.foobarbaz = 5
self.assertEqual(x.foobarbaz, 5)
n1 = ast.Num(1)
n3 = ast.Num(3)
addop = ast.Add()
x = ast.BinOp(n1, addop, n3)
self.assertEqual(x.left, n1)
self.assertEqual(x.op, addop)
self.assertEqual(x.right, n3)
x = ast.BinOp(1, 2, 3)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
x = ast.BinOp(1, 2, 3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, lineno=0)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
# can set attributes through kwargs too
x = ast.BinOp(left=1, op=2, right=3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# Random kwargs also allowed
x = ast.BinOp(1, 2, 3, foobarbaz=42)
self.assertEqual(x.foobarbaz, 42)
def test_no_fields(self):
# this used to fail because Sub._fields was None
x = ast.Sub()
self.assertEqual(x._fields, ())
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except ImportError:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEqual(to_tuple(ast2), to_tuple(ast))
def test_invalid_sum(self):
pos = dict(lineno=2, col_offset=3)
m = ast.Module([ast.Expr(ast.expr(**pos), **pos)])
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
if support.check_impl_detail():
self.assertIn("but got <_ast.expr", str(cm.exception))
def test_invalid_identitifer(self):
m = ast.Module([ast.Expr(ast.Name(42, ast.Load()))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
if support.check_impl_detail():
self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_string(self):
m = ast.Module([ast.Expr(ast.Str(42))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
if support.check_impl_detail():
self.assertIn("string must be of type str or uni", str(cm.exception))
def test_empty_yield_from(self):
# Issue 16546: yield from value is not optional.
empty_yield_from = ast.parse("def f():\n yield from g()")
empty_yield_from.body[0].body[0].value.value = None
with self.assertRaises(ValueError) as cm:
compile(empty_yield_from, "<test>", "exec")
self.assertIn("field value is required", str(cm.exception))
class ASTHelpers_Test(unittest.TestCase):
def test_parse(self):
a = ast.parse('foo(1 + 1)')
b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST)
self.assertEqual(ast.dump(a), ast.dump(b))
def test_parse_in_error(self):
try:
1/0
except Exception:
with self.assertRaises(SyntaxError) as e:
ast.literal_eval(r"'\U'")
self.assertIsNotNone(e.exception.__context__)
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
"args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], "
"keywords=[], starargs=None, kwargs=None))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
"Str('and cheese')], [], None, None))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), "
"lineno=1, col_offset=5), Str(s='and cheese', lineno=1, "
"col_offset=11)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0)])"
)
def test_copy_location(self):
src = ast.parse('1 + 1', mode='eval')
src.body.right = ast.copy_location(ast.Num(2), src.body.right)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), '
'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, '
'col_offset=0))'
)
def test_fix_missing_locations(self):
src = ast.parse('write("spam")')
src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
[ast.Str('eggs')], [], None, None)))
self.assertEqual(src, ast.fix_missing_locations(src))
self.assertEqual(ast.dump(src, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
"lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, "
"col_offset=6)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0), "
"Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, "
"col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], "
"keywords=[], starargs=None, kwargs=None, lineno=1, "
"col_offset=0), lineno=1, col_offset=0)])"
)
def test_increment_lineno(self):
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src, n=3), src)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
# issue10869: do not increment lineno of root twice
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
def test_iter_fields(self):
node = ast.parse('foo()', mode='eval')
d = dict(ast.iter_fields(node.body))
self.assertEqual(d.pop('func').id, 'foo')
self.assertEqual(d, {'keywords': [], 'kwargs': None,
'args': [], 'starargs': None})
def test_iter_child_nodes(self):
node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
iterator = ast.iter_child_nodes(node.body)
self.assertEqual(next(iterator).id, 'spam')
self.assertEqual(next(iterator).n, 23)
self.assertEqual(next(iterator).n, 42)
self.assertEqual(ast.dump(next(iterator)),
"keyword(arg='eggs', value=Str(s='leek'))"
)
def test_get_docstring(self):
node = ast.parse('def foo():\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
def test_literal_eval(self):
self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
self.assertEqual(ast.literal_eval('{1, 2, 3}'), {1, 2, 3})
self.assertEqual(ast.literal_eval('b"hi"'), b"hi")
self.assertRaises(ValueError, ast.literal_eval, 'foo()')
self.assertEqual(ast.literal_eval('-6'), -6)
self.assertEqual(ast.literal_eval('-6j+3'), 3-6j)
self.assertEqual(ast.literal_eval('3.25'), 3.25)
def test_literal_eval_issue4907(self):
self.assertEqual(ast.literal_eval('2j'), 2j)
self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j)
self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j)
def test_bad_integer(self):
# issue13436: Bad error message with invalid numeric values
body = [ast.ImportFrom(module='time',
names=[ast.alias(name='sleep')],
level=None,
lineno=None, col_offset=None)]
mod = ast.Module(body)
with self.assertRaises((TypeError, ValueError)) as cm:
compile(mod, 'test', 'exec')
if support.check_impl_detail():
self.assertIn("invalid integer value: None", str(cm.exception))
class ASTValidatorTests(unittest.TestCase):
def mod(self, mod, msg=None, mode="exec", *, exc=ValueError):
mod.lineno = mod.col_offset = 0
ast.fix_missing_locations(mod)
with self.assertRaises(exc) as cm:
compile(mod, "<test>", mode)
if msg is not None:
self.assertIn(msg, str(cm.exception))
def expr(self, node, msg=None, *, exc=ValueError):
mod = ast.Module([ast.Expr(node)])
self.mod(mod, msg, exc=exc)
def stmt(self, stmt, msg=None):
mod = ast.Module([stmt])
self.mod(mod, msg)
def test_module(self):
m = ast.Interactive([ast.Expr(ast.Name("x", ast.Store()))])
self.mod(m, "must have Load context", "single")
m = ast.Expression(ast.Name("x", ast.Store()))
self.mod(m, "must have Load context", "eval")
def _check_arguments(self, fac, check):
def arguments(args=None, vararg=None, varargannotation=None,
kwonlyargs=None, kwarg=None, kwargannotation=None,
defaults=None, kw_defaults=None):
if args is None:
args = []
if kwonlyargs is None:
kwonlyargs = []
if defaults is None:
defaults = []
if kw_defaults is None:
kw_defaults = []
args = ast.arguments(args, vararg, varargannotation, kwonlyargs,
kwarg, kwargannotation, defaults, kw_defaults)
return fac(args)
args = [ast.arg("x", ast.Name("x", ast.Store()))]
check(arguments(args=args), "must have Load context")
check(arguments(varargannotation=ast.Num(3)),
"varargannotation but no vararg")
check(arguments(varargannotation=ast.Name("x", ast.Store()), vararg="x"),
"must have Load context")
check(arguments(kwonlyargs=args), "must have Load context")
check(arguments(kwargannotation=ast.Num(42)),
"kwargannotation but no kwarg")
check(arguments(kwargannotation=ast.Name("x", ast.Store()),
kwarg="x"), "must have Load context")
check(arguments(defaults=[ast.Num(3)]),
"more positional defaults than args")
check(arguments(kw_defaults=[ast.Num(4)]),
"length of kwonlyargs is not the same as kw_defaults")
args = [ast.arg("x", ast.Name("x", ast.Load()))]
check(arguments(args=args, defaults=[ast.Name("x", ast.Store())]),
"must have Load context")
args = [ast.arg("a", ast.Name("x", ast.Load())),
ast.arg("b", ast.Name("y", ast.Load()))]
check(arguments(kwonlyargs=args,
kw_defaults=[None, ast.Name("x", ast.Store())]),
"must have Load context")
def test_funcdef(self):
a = ast.arguments([], None, None, [], None, None, [], [])
f = ast.FunctionDef("x", a, [], [], None)
self.stmt(f, "empty body on FunctionDef")
f = ast.FunctionDef("x", a, [ast.Pass()], [ast.Name("x", ast.Store())],
None)
self.stmt(f, "must have Load context")
f = ast.FunctionDef("x", a, [ast.Pass()], [],
ast.Name("x", ast.Store()))
self.stmt(f, "must have Load context")
def fac(args):
return ast.FunctionDef("x", args, [ast.Pass()], [], None)
self._check_arguments(fac, self.stmt)
def test_classdef(self):
def cls(bases=None, keywords=None, starargs=None, kwargs=None,
body=None, decorator_list=None):
if bases is None:
bases = []
if keywords is None:
keywords = []
if body is None:
body = [ast.Pass()]
if decorator_list is None:
decorator_list = []
return ast.ClassDef("myclass", bases, keywords, starargs,
kwargs, body, decorator_list)
self.stmt(cls(bases=[ast.Name("x", ast.Store())]),
"must have Load context")
self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store()))]),
"must have Load context")
self.stmt(cls(starargs=ast.Name("x", ast.Store())),
"must have Load context")
self.stmt(cls(kwargs=ast.Name("x", ast.Store())),
"must have Load context")
self.stmt(cls(body=[]), "empty body on ClassDef")
self.stmt(cls(body=[None]), "None disallowed")
self.stmt(cls(decorator_list=[ast.Name("x", ast.Store())]),
"must have Load context")
def test_delete(self):
self.stmt(ast.Delete([]), "empty targets on Delete")
self.stmt(ast.Delete([None]), "None disallowed")
self.stmt(ast.Delete([ast.Name("x", ast.Load())]),
"must have Del context")
def test_assign(self):
self.stmt(ast.Assign([], ast.Num(3)), "empty targets on Assign")
self.stmt(ast.Assign([None], ast.Num(3)), "None disallowed")
self.stmt(ast.Assign([ast.Name("x", ast.Load())], ast.Num(3)),
"must have Store context")
self.stmt(ast.Assign([ast.Name("x", ast.Store())],
ast.Name("y", ast.Store())),
"must have Load context")
def test_augassign(self):
aug = ast.AugAssign(ast.Name("x", ast.Load()), ast.Add(),
ast.Name("y", ast.Load()))
self.stmt(aug, "must have Store context")
aug = ast.AugAssign(ast.Name("x", ast.Store()), ast.Add(),
ast.Name("y", ast.Store()))
self.stmt(aug, "must have Load context")
def test_for(self):
x = ast.Name("x", ast.Store())
y = ast.Name("y", ast.Load())
p = ast.Pass()
self.stmt(ast.For(x, y, [], []), "empty body on For")
self.stmt(ast.For(ast.Name("x", ast.Load()), y, [p], []),
"must have Store context")
self.stmt(ast.For(x, ast.Name("y", ast.Store()), [p], []),
"must have Load context")
e = ast.Expr(ast.Name("x", ast.Store()))
self.stmt(ast.For(x, y, [e], []), "must have Load context")
self.stmt(ast.For(x, y, [p], [e]), "must have Load context")
def test_while(self):
self.stmt(ast.While(ast.Num(3), [], []), "empty body on While")
self.stmt(ast.While(ast.Name("x", ast.Store()), [ast.Pass()], []),
"must have Load context")
self.stmt(ast.While(ast.Num(3), [ast.Pass()],
[ast.Expr(ast.Name("x", ast.Store()))]),
"must have Load context")
def test_if(self):
self.stmt(ast.If(ast.Num(3), [], []), "empty body on If")
i = ast.If(ast.Name("x", ast.Store()), [ast.Pass()], [])
self.stmt(i, "must have Load context")
i = ast.If(ast.Num(3), [ast.Expr(ast.Name("x", ast.Store()))], [])
self.stmt(i, "must have Load context")
i = ast.If(ast.Num(3), [ast.Pass()],
[ast.Expr(ast.Name("x", ast.Store()))])
self.stmt(i, "must have Load context")
def test_with(self):
p = ast.Pass()
self.stmt(ast.With([], [p]), "empty items on With")
i = ast.withitem(ast.Num(3), None)
self.stmt(ast.With([i], []), "empty body on With")
i = ast.withitem(ast.Name("x", ast.Store()), None)
self.stmt(ast.With([i], [p]), "must have Load context")
i = ast.withitem(ast.Num(3), ast.Name("x", ast.Load()))
self.stmt(ast.With([i], [p]), "must have Store context")
def test_raise(self):
r = ast.Raise(None, ast.Num(3))
self.stmt(r, "Raise with cause but no exception")
r = ast.Raise(ast.Name("x", ast.Store()), None)
self.stmt(r, "must have Load context")
r = ast.Raise(ast.Num(4), ast.Name("x", ast.Store()))
self.stmt(r, "must have Load context")
def test_try(self):
p = ast.Pass()
t = ast.Try([], [], [], [p])
self.stmt(t, "empty body on Try")
t = ast.Try([ast.Expr(ast.Name("x", ast.Store()))], [], [], [p])
self.stmt(t, "must have Load context")
t = ast.Try([p], [], [], [])
self.stmt(t, "Try has neither except handlers nor finalbody")
t = ast.Try([p], [], [p], [p])
self.stmt(t, "Try has orelse but no except handlers")
t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], [])
self.stmt(t, "empty body on ExceptHandler")
e = [ast.ExceptHandler(ast.Name("x", ast.Store()), "y", [p])]
self.stmt(ast.Try([p], e, [], []), "must have Load context")
e = [ast.ExceptHandler(None, "x", [p])]
t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store()))], [p])
self.stmt(t, "must have Load context")
t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store()))])
self.stmt(t, "must have Load context")
def test_assert(self):
self.stmt(ast.Assert(ast.Name("x", ast.Store()), None),
"must have Load context")
assrt = ast.Assert(ast.Name("x", ast.Load()),
ast.Name("y", ast.Store()))
self.stmt(assrt, "must have Load context")
def test_import(self):
self.stmt(ast.Import([]), "empty names on Import")
def test_importfrom(self):
imp = ast.ImportFrom(None, [ast.alias("x", None)], -42)
self.stmt(imp, "level less than -1")
self.stmt(ast.ImportFrom(None, [], 0), "empty names on ImportFrom")
def test_global(self):
self.stmt(ast.Global([]), "empty names on Global")
def test_nonlocal(self):
self.stmt(ast.Nonlocal([]), "empty names on Nonlocal")
def test_expr(self):
e = ast.Expr(ast.Name("x", ast.Store()))
self.stmt(e, "must have Load context")
def test_boolop(self):
b = ast.BoolOp(ast.And(), [])
self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And(), [ast.Num(3)])
self.expr(b, "less than 2 values")
b = ast.BoolOp(ast.And(), [ast.Num(4), None])
self.expr(b, "None disallowed")
b = ast.BoolOp(ast.And(), [ast.Num(4), ast.Name("x", ast.Store())])
self.expr(b, "must have Load context")
def test_unaryop(self):
u = ast.UnaryOp(ast.Not(), ast.Name("x", ast.Store()))
self.expr(u, "must have Load context")
def test_lambda(self):
a = ast.arguments([], None, None, [], None, None, [], [])
self.expr(ast.Lambda(a, ast.Name("x", ast.Store())),
"must have Load context")
def fac(args):
return ast.Lambda(args, ast.Name("x", ast.Load()))
self._check_arguments(fac, self.expr)
def test_ifexp(self):
l = ast.Name("x", ast.Load())
s = ast.Name("y", ast.Store())
for args in (s, l, l), (l, s, l), (l, l, s):
self.expr(ast.IfExp(*args), "must have Load context")
def test_dict(self):
d = ast.Dict([], [ast.Name("x", ast.Load())])
self.expr(d, "same number of keys as values")
d = ast.Dict([None], [ast.Name("x", ast.Load())])
self.expr(d, "None disallowed")
d = ast.Dict([ast.Name("x", ast.Load())], [None])
self.expr(d, "None disallowed")
def test_set(self):
self.expr(ast.Set([None]), "None disallowed")
s = ast.Set([ast.Name("x", ast.Store())])
self.expr(s, "must have Load context")
def _check_comprehension(self, fac):
self.expr(fac([]), "comprehension with no generators")
g = ast.comprehension(ast.Name("x", ast.Load()),
ast.Name("x", ast.Load()), [])
self.expr(fac([g]), "must have Store context")
g = ast.comprehension(ast.Name("x", ast.Store()),
ast.Name("x", ast.Store()), [])
self.expr(fac([g]), "must have Load context")
x = ast.Name("x", ast.Store())
y = ast.Name("y", ast.Load())
g = ast.comprehension(x, y, [None])
self.expr(fac([g]), "None disallowed")
g = ast.comprehension(x, y, [ast.Name("x", ast.Store())])
self.expr(fac([g]), "must have Load context")
def _simple_comp(self, fac):
g = ast.comprehension(ast.Name("x", ast.Store()),
ast.Name("x", ast.Load()), [])
self.expr(fac(ast.Name("x", ast.Store()), [g]),
"must have Load context")
def wrap(gens):
return fac(ast.Name("x", ast.Store()), gens)
self._check_comprehension(wrap)
def test_listcomp(self):
self._simple_comp(ast.ListComp)
def test_setcomp(self):
self._simple_comp(ast.SetComp)
def test_generatorexp(self):
self._simple_comp(ast.GeneratorExp)
def test_dictcomp(self):
g = ast.comprehension(ast.Name("y", ast.Store()),
ast.Name("p", ast.Load()), [])
c = ast.DictComp(ast.Name("x", ast.Store()),
ast.Name("y", ast.Load()), [g])
self.expr(c, "must have Load context")
c = ast.DictComp(ast.Name("x", ast.Load()),
ast.Name("y", ast.Store()), [g])
self.expr(c, "must have Load context")
def factory(comps):
k = ast.Name("x", ast.Load())
v = ast.Name("y", ast.Load())
return ast.DictComp(k, v, comps)
self._check_comprehension(factory)
def test_yield(self):
self.expr(ast.Yield(ast.Name("x", ast.Store())), "must have Load")
self.expr(ast.YieldFrom(ast.Name("x", ast.Store())), "must have Load")
def test_compare(self):
left = ast.Name("x", ast.Load())
comp = ast.Compare(left, [ast.In()], [])
self.expr(comp, "no comparators")
comp = ast.Compare(left, [ast.In()], [ast.Num(4), ast.Num(5)])
self.expr(comp, "different number of comparators and operands")
comp = ast.Compare(ast.Num("blah"), [ast.In()], [left])
self.expr(comp, "non-numeric", exc=TypeError)
comp = ast.Compare(left, [ast.In()], [ast.Num("blah")])
self.expr(comp, "non-numeric", exc=TypeError)
def test_call(self):
func = ast.Name("x", ast.Load())
args = [ast.Name("y", ast.Load())]
keywords = [ast.keyword("w", ast.Name("z", ast.Load()))]
stararg = ast.Name("p", ast.Load())
kwarg = ast.Name("q", ast.Load())
call = ast.Call(ast.Name("x", ast.Store()), args, keywords, stararg,
kwarg)
self.expr(call, "must have Load context")
call = ast.Call(func, [None], keywords, stararg, kwarg)
self.expr(call, "None disallowed")
bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store()))]
call = ast.Call(func, args, bad_keywords, stararg, kwarg)
self.expr(call, "must have Load context")
call = ast.Call(func, args, keywords, ast.Name("z", ast.Store()), kwarg)
self.expr(call, "must have Load context")
call = ast.Call(func, args, keywords, stararg,
ast.Name("w", ast.Store()))
self.expr(call, "must have Load context")
def test_num(self):
class subint(int):
pass
class subfloat(float):
pass
class subcomplex(complex):
pass
for obj in "0", "hello", subint(), subfloat(), subcomplex():
self.expr(ast.Num(obj), "non-numeric", exc=TypeError)
def test_attribute(self):
attr = ast.Attribute(ast.Name("x", ast.Store()), "y", ast.Load())
self.expr(attr, "must have Load context")
def test_subscript(self):
sub = ast.Subscript(ast.Name("x", ast.Store()), ast.Index(ast.Num(3)),
ast.Load())
self.expr(sub, "must have Load context")
x = ast.Name("x", ast.Load())
sub = ast.Subscript(x, ast.Index(ast.Name("y", ast.Store())),
ast.Load())
self.expr(sub, "must have Load context")
s = ast.Name("x", ast.Store())
for args in (s, None, None), (None, s, None), (None, None, s):
sl = ast.Slice(*args)
self.expr(ast.Subscript(x, sl, ast.Load()),
"must have Load context")
sl = ast.ExtSlice([])
self.expr(ast.Subscript(x, sl, ast.Load()), "empty dims on ExtSlice")
sl = ast.ExtSlice([ast.Index(s)])
self.expr(ast.Subscript(x, sl, ast.Load()), "must have Load context")
def test_starred(self):
left = ast.List([ast.Starred(ast.Name("x", ast.Load()), ast.Store())],
ast.Store())
assign = ast.Assign([left], ast.Num(4))
self.stmt(assign, "must have Store context")
def _sequence(self, fac):
self.expr(fac([None], ast.Load()), "None disallowed")
self.expr(fac([ast.Name("x", ast.Store())], ast.Load()),
"must have Load context")
def test_list(self):
self._sequence(ast.List)
def test_tuple(self):
self._sequence(ast.Tuple)
def test_stdlib_validates(self):
stdlib = os.path.dirname(ast.__file__)
tests = [fn for fn in os.listdir(stdlib) if fn.endswith(".py")]
tests.extend(["test/test_grammar.py", "test/test_unpack_ex.py"])
for module in tests:
fn = os.path.join(stdlib, module)
with open(fn, "r", encoding="utf-8") as fp:
source = fp.read()
mod = ast.parse(source)
compile(mod, fn, "exec")
def test_main():
support.run_unittest(AST_Tests, ASTHelpers_Test, ASTValidatorTests)
def main():
if __name__ != '__main__':
return
if sys.argv[1:] == ['-g']:
for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
(eval_tests, "eval")):
print(kind+"_results = [")
for s in statements:
print(repr(to_tuple(compile(s, "?", kind, 0x400)))+",")
print("]")
print("main()")
raise SystemExit
test_main()
#### EVERYTHING BELOW IS GENERATED #####
exec_results = [
('Module', [('Expr', (1, 0), ('Name', (1, 0), 'None', ('Load',)))]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, [], None, None, [], []), [('Pass', (1, 9))], [], None)]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('arg', 'a', None)], None, None, [], None, None, [], []), [('Pass', (1, 10))], [], None)]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('arg', 'a', None)], None, None, [], None, None, [('Num', (1, 8), 0)], []), [('Pass', (1, 12))], [], None)]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], 'args', None, [], None, None, [], []), [('Pass', (1, 14))], [], None)]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, [], 'kwargs', None, [], []), [('Pass', (1, 17))], [], None)]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('arg', 'a', None), ('arg', 'b', None), ('arg', 'c', None), ('arg', 'd', None), ('arg', 'e', None)], 'args', None, [], 'kwargs', None, [('Num', (1, 11), 1), ('Name', (1, 16), 'None', ('Load',)), ('List', (1, 24), [], ('Load',)), ('Dict', (1, 30), [], [])], []), [('Pass', (1, 52))], [], None)]),
('Module', [('ClassDef', (1, 0), 'C', [], [], None, None, [('Pass', (1, 8))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [('Name', (1, 8), 'object', ('Load',))], [], None, None, [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, [], None, None, [], []), [('Return', (1, 8), ('Num', (1, 15), 1))], [], None)]),
('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]),
('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]),
('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]),
('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]),
('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]),
('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]),
('Module', [('With', (1, 0), [('withitem', ('Name', (1, 5), 'x', ('Load',)), ('Name', (1, 10), 'y', ('Store',)))], [('Pass', (1, 13))])]),
('Module', [('With', (1, 0), [('withitem', ('Name', (1, 5), 'x', ('Load',)), ('Name', (1, 10), 'y', ('Store',))), ('withitem', ('Name', (1, 13), 'z', ('Load',)), ('Name', (1, 18), 'q', ('Store',)))], [('Pass', (1, 21))])]),
('Module', [('Raise', (1, 0), ('Call', (1, 6), ('Name', (1, 6), 'Exception', ('Load',)), [('Str', (1, 16), 'string')], [], None, None), None)]),
('Module', [('Try', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [], [])]),
('Module', [('Try', (1, 0), [('Pass', (2, 2))], [], [], [('Pass', (4, 2))])]),
('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]),
('Module', [('Import', (1, 0), [('alias', 'sys', None)])]),
('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]),
('Module', [('Global', (1, 0), ['v'])]),
('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]),
('Module', [('Pass', (1, 0))]),
('Module', [('Break', (1, 0))]),
('Module', [('Continue', (1, 0))]),
('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]),
('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]),
]
single_results = [
('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]),
]
eval_results = [
('Expression', ('Name', (1, 0), 'None', ('Load',))),
('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])),
('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))),
('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))),
('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, [], None, None, [], []), ('Name', (1, 7), 'None', ('Load',)))),
('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])),
('Expression', ('Dict', (1, 0), [], [])),
('Expression', ('Set', (1, 0), [('Name', (1, 1), 'None', ('Load',))])),
('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])),
('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])),
('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))),
('Expression', ('Num', (1, 0), 10)),
('Expression', ('Str', (1, 0), 'string')),
('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))),
('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))),
('Expression', ('Name', (1, 0), 'v', ('Load',))),
('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('List', (1, 0), [], ('Load',))),
('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))),
('Expression', ('Tuple', (1, 1), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('Tuple', (1, 0), [], ('Load',))),
('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)),
]
main()
|
|
#!/usr/bin/python
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Routines for registering images, based upon star locations.
"""
__all__ = (
'RegistrationFailed',
'RegistrationResult',
'register_pair',
)
import collections
import random
import numpy
# Maximum number of RANSAC iterations to run before giving up.
MAX_ITERS = 100000
# Number of stars that must be paired in a given solution.
NUM_STARS_TO_PAIR = 4
# Maximum permissable distance between two paired stars.
MAX_DISTANCE = 3.0
# Number of registrations that are tried if the initial registration fails.
REGISTRATION_RETRIES = 3
class RegistrationFailed(Exception):
pass
def _fits_model(pair, model):
"""
Check if a given pair of stars fits the model implied by a given sequence
of correspondences.
"""
# Check distances from the new star to already paired stars are the same in
# either image
s1, s2 = pair
for t1, t2 in model:
if abs(s1.dist(t1) - s2.dist(t2)) > MAX_DISTANCE:
return False
return True
def _pick_random_model(stars1, stars2):
return zip(random.sample(stars1, 2), random.sample(stars2, 2))
def _find_correspondences(stars1, stars2):
"""
Find a sequence of at least NUM_STARS_TO_PAIR correspondences that form a
consistent model.
"""
stars1 = list(stars1)
stars2 = list(stars2)
for i in range(MAX_ITERS):
model = _pick_random_model(stars1, stars2)
if not _fits_model(model[1], model[:1]):
continue
for s1 in stars1:
if s1 in (pair[0] for pair in model):
continue
for s2 in stars2:
if s2 in (pair[1] for pair in model):
continue
if _fits_model((s1, s2), model):
model.append((s1, s2))
if len(model) >= NUM_STARS_TO_PAIR:
return model
raise RegistrationFailed
def _transformation_from_correspondences(correspondences):
"""
Return an affine transformation [R | T] such that:
sum ||R*p1,i + T - p2,i||^2
is minimized. Where p1,i and p2,i is the position vector of the first and
second star in the i'th correspondence, respectively.
"""
# The algorithm proceeds by first subtracting the centroid from each set of
# points. A rotation matrix (ie. a 2x2 orthogonal matrix) must now be
# sought which maps the translated points1 onto points2. The SVD is used to
# do this. See:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = numpy.vstack(s1.pos_vec.T for s1, s2 in correspondences)
points2 = numpy.vstack(s2.pos_vec.T for s1, s2 in correspondences)
def centroid(points):
return numpy.sum(points, axis=0) / points.shape[0]
c1 = centroid(points1)
c2 = centroid(points2)
points1 -= c1
points2 -= c2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack([numpy.hstack((R, c2.T - R * c1.T)),
numpy.matrix([0., 0., 1.])])
def register_pair(stars1, stars2):
"""
Align a pair of images, based on their stars.
Arguments:
stars1: The stars in the first image.
stars2: The stars in the second image.
Returns:
A 3x3 affine transformation matrix, mapping star coordinates in the
first image, to star coordinates in the second image.
"""
return _transformation_from_correspondences(
_find_correspondences(stars1, stars2))
class RegistrationResult(collections.namedtuple('_RegistrationResultBase',
('exception', 'transform'))):
"""
The result of a single image's registration.
One of these is returned for each input image in a `register_many` call.
"""
def result(self):
if self.exception:
raise self.exception
return self.transform
def register_many(stars_seq, reference_idx=0):
"""
Register a sequence of images, based on their stars.
Arguments:
stars_list: A list of iterables of stars. Each element corresponds with
the stars from a particular image.
Returns:
An iterable of `RegistrationResult`, with one per input image. The
first result is always the identity matrix, whereas subsequent results
give the transformation to map the first image onto the corresponding
input image, or a `RegistrationFailed` exception in the case that
registration failed.
"""
stars_it = iter(stars_seq)
# The first image is used as the reference, so has the identity
# transformation.
registered = [(next(stars_it), numpy.matrix(numpy.identity(3)))]
yield RegistrationResult(exception=None, transform=registered[0][1])
# For each other image, first attempt to register it with the first image,
# and then with the last `REGISTRATION_RETRIES` successfully registered
# images. This seems to give good success rates, while not having too much
# drift.
for stars2 in stars_it:
for stars1, M1 in [registered[0]] + registered[-REGISTRATION_RETRIES:]:
try:
M2 = register_pair(stars1, stars2)
except RegistrationFailed as e:
continue
else:
yield RegistrationResult(exception=None, transform=(M1 * M2))
break
else:
yield RegistrationResult(exception=RegistrationFailed(),
transform=None)
registered.append((stars2, (M1 * M2)))
def _draw_correspondences(correspondences, im1, im2, stars1, stars2):
"""
Produce a sequence of images to illustrate a particular correspondence.
"""
assert im1.shape == im2.shape
SCALE_FACTOR = 0.4
new_size = (int(im1.shape[1] * SCALE_FACTOR),
int(im1.shape[0] * SCALE_FACTOR))
im1 = cv2.resize(im1, new_size)
im2 = cv2.resize(im2, new_size)
def boost_brightness(im):
return numpy.min([im.astype(numpy.float64) * 16,
255. * numpy.ones(im.shape)],
axis=0).astype(numpy.uint8)
im1 = cv2.cvtColor(boost_brightness(im1), cv2.COLOR_GRAY2RGB)
im2 = cv2.cvtColor(boost_brightness(im2), cv2.COLOR_GRAY2RGB)
def star_pos(s):
return tuple(int(x * SCALE_FACTOR) for x in s.pos)
def draw_stars(im, stars, color):
for s in stars:
pos = star_pos(s)
cv2.circle(im, pos, radius=5, color=color, lineType=cv2.CV_AA)
def output_image(name, im1, im2):
im = numpy.hstack([im1, im2])
cv2.imwrite(name, im)
draw_stars(im1, stars1, color=(0, 0, 255))
draw_stars(im2, stars2, color=(0, 0, 255))
step_num = 0
output_image("step{}.png".format(step_num), im1, im2)
step_num += 1
LINE_COLOURS = [(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(0, 255, 255),
(255, 0, 255),
(255, 255, 0)]
for idx, (s1, s2) in enumerate(correspondences):
im1_copy = im1.copy()
im2_copy = im2.copy()
draw_stars(im1, [s1], color=(255, 255, 0))
draw_stars(im2, [s2], color=(255, 255, 0))
draw_stars(im1_copy, [s1], color=(0, 255, 255))
draw_stars(im2_copy, [s2], color=(0, 255, 255))
output_image("step{}.png".format(step_num), im1_copy, im2_copy)
step_num += 1
for idx2, (t1, t2) in enumerate(correspondences[:idx]):
cv2.line(im1_copy, star_pos(t1), star_pos(s1), LINE_COLOURS[idx2],
lineType=cv2.CV_AA)
cv2.line(im2_copy, star_pos(t2), star_pos(s2), LINE_COLOURS[idx2],
lineType=cv2.CV_AA)
output_image("step{}.png".format(step_num), im1_copy, im2_copy)
step_num += 1
if __name__ == "__main__":
import sys
import cv2
import stars
if sys.argv[1] == "register_pair":
im1 = cv2.imread(sys.argv[2], cv2.IMREAD_GRAYSCALE)
im2 = cv2.imread(sys.argv[3], cv2.IMREAD_GRAYSCALE)
stars1 = stars.extract(im1)
stars2 = stars.extract(im2)
A = register_pair(stars1, stars2)
print A
if sys.argv[1] == "draw_correspondences":
im1 = cv2.imread(sys.argv[2], cv2.IMREAD_GRAYSCALE)
im2 = cv2.imread(sys.argv[3], cv2.IMREAD_GRAYSCALE)
stars1 = list(stars.extract(im1))
stars2 = list(stars.extract(im2))
correspondences = _find_correspondences(stars1, stars2)
_draw_correspondences(correspondences, im1, im2, stars1, stars2)
if sys.argv[1] == "register_many":
fnames = sys.argv[2:]
ims = []
for fname in fnames:
print "Loading {}".format(fname)
ims.append(cv2.imread(fname, cv2.IMREAD_GRAYSCALE))
stars_list = []
for fname, im in zip(fnames, ims):
try:
print "Extracting stars from {}".format(fname)
stars_list.append((fname, list(stars.extract(im))))
except stars.ExtractFailed as e:
print "Failed to extract stars from {}".format(fname)
for fname, reg_result in zip(
(fname for fname, stars in stars_list),
register_many(stars for fname, stars in stars_list)):
if reg_result.exception:
assert reg_result.transform is None
print "Failed to register {}: {}".format(
fname, reg_result.exception)
elif reg_result.transform is not None:
assert reg_result.exception is None
print "Successfully registered {}".format(fname)
print reg_result.transform
else:
assert False
|
|
"""Stochastic optimization methods for MLP
"""
# Authors: Jiyuan Qian <jq401@nyu.edu>
# License: BSD 3 clause
import numpy as np
class BaseOptimizer(object):
"""Base (Stochastic) gradient descent optimizer
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, optional, default 0.1
The initial learning rate used. It controls the step-size in updating
the weights
Attributes
----------
learning_rate : float
the current learning rate
"""
def __init__(self, params, learning_rate_init=0.1):
self.params = [param for param in params]
self.learning_rate_init = learning_rate_init
self.learning_rate = float(learning_rate_init)
def update_params(self, grads):
"""Update parameters with given gradients
Parameters
----------
grads : list, length = len(params)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
"""
updates = self._get_updates(grads)
for param, update in zip(self.params, updates):
param += update
def iteration_ends(self, time_step):
"""Perform update to learning rate and potentially other states at the
end of an iteration
"""
pass
def trigger_stopping(self, msg, verbose):
"""Decides whether it is time to stop training
Parameters
----------
msg : str
Message passed in for verbose output
verbose : bool
Print message to stdin if True
Returns
-------
is_stopping : bool
True if training needs to stop
"""
if verbose:
print(msg + " Stopping.")
return True
class SGDOptimizer(BaseOptimizer):
"""Stochastic gradient descent optimizer with momentum
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, optional, default 0.1
The initial learning rate used. It controls the step-size in updating
the weights
lr_schedule : {'constant', 'adaptive', 'invscaling'}, default 'constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate 'learning_rate_' at
each time step 't' using an inverse scaling exponent of 'power_t'.
learning_rate_ = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as the training keeps decreasing.
Each time 2 consecutive epochs fail to decrease the training loss by
tol, or fail to increase validation score by tol if 'early_stopping'
is on, the current learning rate is divided by 5.
momentum : float, optional, default 0.9
Value of momentum used, must be larger than or equal to 0
nesterov : bool, optional, default True
Whether to use nesterov's momentum or not. Use nesterov's if True
Attributes
----------
learning_rate : float
the current learning rate
velocities : list, length = len(params)
velocities that are used to update params
"""
def __init__(self, params, learning_rate_init=0.1, lr_schedule='constant',
momentum=0.9, nesterov=True, power_t=0.5):
super(SGDOptimizer, self).__init__(params, learning_rate_init)
self.lr_schedule = lr_schedule
self.momentum = momentum
self.nesterov = nesterov
self.power_t = power_t
self.velocities = [np.zeros_like(param) for param in params]
def iteration_ends(self, time_step):
"""Perform updates to learning rate and potential other states at the
end of an iteration
Parameters
----------
time_step : int
number of training samples trained on so far, used to update
learning rate for 'invscaling'
"""
if self.lr_schedule == 'invscaling':
self.learning_rate = (float(self.learning_rate_init) /
(time_step + 1) ** self.power_t)
def trigger_stopping(self, msg, verbose):
if self.lr_schedule == 'adaptive':
if self.learning_rate > 1e-6:
self.learning_rate /= 5.
if verbose:
print(msg + " Setting learning rate to %f" %
self.learning_rate)
return False
else:
if verbose:
print(msg + " Learning rate too small. Stopping.")
return True
else:
if verbose:
print(msg + " Stopping.")
return True
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
updates = [self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)]
self.velocities = updates
if self.nesterov:
updates = [self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)]
return updates
class AdamOptimizer(BaseOptimizer):
"""Stochastic gradient descent optimizer with Adam
Note: All default values are from the original Adam paper
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, optional, default 0.1
The initial learning rate used. It controls the step-size in updating
the weights
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector, should be
in [0, 1)
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector, should be
in [0, 1)
epsilon : float, optional, default 1e-8
Value for numerical stability
Attributes
----------
learning_rate : float
The current learning rate
t : int
Timestep
ms : list, length = len(params)
First moment vectors
vs : list, length = len(params)
Second moment vectors
References
----------
Kingma, Diederik, and Jimmy Ba.
"Adam: A method for stochastic optimization."
arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, params, learning_rate_init=0.001, beta_1=0.9,
beta_2=0.999, epsilon=1e-8):
super(AdamOptimizer, self).__init__(params, learning_rate_init)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.t = 0
self.ms = [np.zeros_like(param) for param in params]
self.vs = [np.zeros_like(param) for param in params]
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
self.t += 1
self.ms = [self.beta_1 * m + (1 - self.beta_1) * grad
for m, grad in zip(self.ms, grads)]
self.vs = [self.beta_2 * v + (1 - self.beta_2) * (grad ** 2)
for v, grad in zip(self.vs, grads)]
self.learning_rate = (self.learning_rate_init *
np.sqrt(1 - self.beta_2 ** self.t) /
(1 - self.beta_1 ** self.t))
updates = [-self.learning_rate * m / (np.sqrt(v) + self.epsilon)
for m, v in zip(self.ms, self.vs)]
return updates
|
|
import shutil
import tempfile
from numpy import array, allclose, transpose, random, dot, corrcoef, diag
from numpy.linalg import norm
import scipy.linalg as LinAlg
from thunder.factorization.ica import ICA
from thunder.factorization.svd import SVD
from thunder.factorization.nmf import NMF
from thunder.factorization.pca import PCA
from thunder.utils.datasets import DataSets
from thunder.rdds.matrices import RowMatrix
from test_utils import PySparkTestCase
class FactorizationTestCase(PySparkTestCase):
def setUp(self):
super(FactorizationTestCase, self).setUp()
self.outputdir = tempfile.mkdtemp()
def tearDown(self):
super(FactorizationTestCase, self).tearDown()
shutil.rmtree(self.outputdir)
class TestPCA(FactorizationTestCase):
"""
Test execution and accuracy of PCA
Compares against results using scikit learn, up to a sign flip
NOTE: to make the comparison we rescale the scores by the singular (latent) values
because by convention we normlize by latent values whereas scikit learn does not
"""
def test_pca(self):
dataLocal = [
array([1.0, 1.0, 1.0, 5.0]),
array([2.0, 3.0, 4.0, 1.0]),
array([6.0, 0.0, 6.0, 6.0])
]
data = self.sc.parallelize(zip(range(1, 4), dataLocal))
mat = RowMatrix(data)
pca1 = PCA(k=1, svdMethod='direct')
pca1.fit(mat)
out1_comps = pca1.comps
out1_scores = pca1.scores.collectValuesAsArray() * pca1.latent
out1_transform_scores = pca1.transform(mat).collectValuesAsArray() * pca1.latent
from sklearn.decomposition import PCA as skPCA
pca2 = skPCA(n_components=1)
pca2.fit(array(dataLocal))
out2_comps = pca2.components_
out2_scores = pca2.transform(array(dataLocal))
assert(allclose(out1_comps, out2_comps) | allclose(out1_comps, -out2_comps))
assert(allclose(out1_scores, out2_scores) | allclose(out1_scores, -out2_scores))
assert(allclose(out1_scores, out1_transform_scores))
class TestSVD(FactorizationTestCase):
"""Test accuracy of direct and em methods
for SVD against scipy.linalg method,
Only uses k=1 otherwise results of iterative approaches can
vary up to an orthogonal transform
Checks if answers match up to a sign flip
"""
def test_SvdDirect(self):
dataLocal = [
array([1.0, 2.0, 6.0]),
array([1.0, 3.0, 0.0]),
array([1.0, 4.0, 6.0]),
array([5.0, 1.0, 4.0])
]
data = self.sc.parallelize(zip(range(1, 5), dataLocal))
mat = RowMatrix(data)
svd = SVD(k=1, method="direct")
svd.calc(mat)
uTrue, sTrue, vTrue = LinAlg.svd(array(dataLocal))
uTest = transpose(array(svd.u.rows().collect()))[0]
vTest = svd.v[0]
assert(allclose(svd.s[0], sTrue[0]))
assert(allclose(vTest, vTrue[0, :]) | allclose(-vTest, vTrue[0, :]))
assert(allclose(uTest, uTrue[:, 0]) | allclose(-uTest, uTrue[:, 0]))
def test_SvdEM(self):
dataLocal = [
array([1.0, 2.0, 6.0]),
array([1.0, 3.0, 0.0]),
array([1.0, 4.0, 6.0]),
array([5.0, 1.0, 4.0])
]
data = self.sc.parallelize(zip(range(1, 5), dataLocal))
mat = RowMatrix(data)
svd = SVD(k=1, method="em")
svd.calc(mat)
uTrue, sTrue, vTrue = LinAlg.svd(array(dataLocal))
uTest = transpose(array(svd.u.rows().collect()))[0]
vTest = svd.v[0]
tol = 10e-04 # allow small error for iterative method
assert(allclose(svd.s[0], sTrue[0], atol=tol))
assert(allclose(vTest, vTrue[0, :], atol=tol) | allclose(-vTest, vTrue[0, :], atol=tol))
assert(allclose(uTest, uTrue[:, 0], atol=tol) | allclose(-uTest, uTrue[:, 0], atol=tol))
def test_conversion(self):
from thunder.rdds.series import Series
dataLocal = [
array([1.0, 2.0, 6.0]),
array([1.0, 3.0, 0.0]),
array([1.0, 4.0, 6.0]),
array([5.0, 1.0, 4.0])
]
data = Series(self.sc.parallelize(zip(range(1, 5), dataLocal)))
SVD(k=1, method='direct').calc(data)
class TestICA(FactorizationTestCase):
"""Test ICA results against ground truth,
taking into account possible sign flips and permutations
"""
def test_ica(self):
random.seed(42)
data, s, a = DataSets.make(self.sc, "ica", nrows=100, returnParams=True)
ica = ICA(c=2, svdMethod="direct", seed=1)
ica.fit(data)
s_ = array(ica.sigs.rows().collect())
# test accurate recovery of original signals
tol = 0.01
assert(allclose(abs(corrcoef(s[:, 0], s_[:, 0])[0, 1]), 1, atol=tol)
or allclose(abs(corrcoef(s[:, 0], s_[:, 1])[0, 1]), 1, atol=tol))
assert(allclose(abs(corrcoef(s[:, 1], s_[:, 0])[0, 1]), 1, atol=tol)
or allclose(abs(corrcoef(s[:, 1], s_[:, 1])[0, 1]), 1, atol=tol))
# test accurate reconstruction from sources
assert(allclose(array(data.rows().collect()), dot(s_, ica.a.T)))
class TestNMF(FactorizationTestCase):
def test_als(self):
""" Test accuracy of alternating least-squares NMF algorithm
against the MATLAB-computed version
"""
# set data and initializing constants
keys = [array([i+1]) for i in range(4)]
dataLocal = array([
[1.0, 2.0, 6.0],
[1.0, 3.0, 0.0],
[1.0, 4.0, 6.0],
[5.0, 1.0, 4.0]])
data = self.sc.parallelize(zip(keys, dataLocal))
mat = RowMatrix(data)
h0 = array(
[[0.09082617, 0.85490047, 0.57234593],
[0.82766740, 0.21301186, 0.90913979]])
# if the rows of h are not normalized on each iteration:
hTrue = array(
[[0. , 0.6010, 0.9163],
[0.8970, 0.1556, 0.7423]])
wTrue = array(
[[4.5885, 1.5348],
[1.3651, 0.2184],
[5.9349, 1.0030],
[0. , 5.5147]])
# if the columns of h are normalized (as in the current implementation):
scaleMat = diag(norm(hTrue, axis=1))
hTrue = dot(LinAlg.inv(scaleMat), hTrue)
wTrue = dot(wTrue, scaleMat)
# calculate NMF using the Thunder implementation
# (maxiter=9 corresponds with Matlab algorithm)
nmfThunder = NMF(k=2, method="als", h0=h0, maxIter=9)
nmfThunder.fit(mat)
hThunder = nmfThunder.h
wThunder = array(nmfThunder.w.values().collect())
tol = 1e-03 # allow small error
assert(allclose(wThunder, wTrue, atol=tol))
assert(allclose(hThunder, hTrue, atol=tol))
def test_init(self):
"""
test performance of whole function, including random initialization
"""
dataLocal = array([
[1.0, 2.0, 6.0],
[1.0, 3.0, 0.0],
[1.0, 4.0, 6.0],
[5.0, 1.0, 4.0]])
data = self.sc.parallelize(zip([array([i]) for i in range(dataLocal.shape[0])], dataLocal))
mat = RowMatrix(data)
nmfThunder = NMF(k=2, reconHist='final')
nmfThunder.fit(mat)
# check to see if Thunder's solution achieves close-to-optimal reconstruction error
# scikit-learn's solution achieves 2.993952
# matlab's non-deterministic implementation usually achieves < 2.9950 (when it converges)
assert(nmfThunder.reconErr < 2.9950)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU V2 layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True),
('unroll', 'tanh', 'sigmoid', 0, True, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True),
('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False)
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias,
reset_after):
layer = rnn.GRU(1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after)
self.assertFalse(layer._could_use_gpu_kernel)
@testing_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = rnn.GRU(1, activation=nn.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = rnn.GRU(1, recurrent_activation=nn.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_keras_model_with_gru(self):
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = np_utils.to_categorical(y_train, output_shape)
layer = rnn.GRU(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = rnn.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@testing_utils.run_v2_only
def test_gru_v2_feature_parity_with_canonical_gru(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=87654321)
y_train = np_utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
gru_layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = gru_layer(masked_input)
gru_model = keras.models.Model(inputs, output)
weights = gru_model.get_weights()
y_1 = gru_model.predict(x_train)
gru_model.compile('rmsprop', 'mse')
gru_model.fit(x_train, y_train)
y_2 = gru_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
cudnn_layer = rnn.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_gru_v2_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.GRU(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_gru_v2_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with testing_utils.device(should_use_gpu=False):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses
# 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve
# the same output.
with testing_utils.device(should_use_gpu=True):
layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
canonical_model.set_weights(weights)
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5)
self.assertAllClose(y_2, y_3, rtol=1e-5, atol=1e-5)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards,
reset_after=True)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
gru_model = build_model(rnn_v1.GRU)
y_ref = gru_model.predict(x_train)
weights = gru_model.get_weights()
gru_v2_model = build_model(rnn.GRU)
gru_v2_model.set_weights(weights)
y = gru_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
def test_with_masking_layer_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
layer_class = rnn.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@testing_utils.run_v2_only
def test_float64_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Double type is yet not supported in ROCm')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_return_states_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
layer_class = rnn.GRU
x = np.random.random((2, 3, 4))
y = np.abs(np.random.random((2, 5)))
s = np.abs(np.random.random((2, 5)))
inputs = keras.layers.Input(
shape=[3, 4], dtype=dtypes.float32)
masked = keras.layers.Masking()(inputs)
outputs, states = layer_class(units=5, return_state=True)(masked)
model = keras.models.Model(inputs, [outputs, states])
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_GRU_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.GRU(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
@testing_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(np.bool)
mask[:, masksteps:] = 0
# Test for V1 behavior.
lstm_v1 = rnn_v1.GRU(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask))
outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1)
# Test for V2 behavior.
lstm = rnn.GRU(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked = lstm(inputs, mask=constant_op.constant(mask))
outputs_trimmed = lstm(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
@tf_test_util.enable_output_all_intermediates
def test_v1_session_behavior(self):
with ops.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).shuffle(100).batch(32)
inp = keras.layers.Input(shape=(4, 8))
layer = rnn.GRU(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss='mse', optimizer='sgd')
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep)))
layer = rnn.GRU(units)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
# TODO (b/169895267): test with xla_gpu is disabled.
def test_deepcopy(self):
if not context.executing_eagerly():
self.skipTest('v2-only test')
original_layer = rnn.GRU(5)
copied_layer = copy.deepcopy(original_layer)
self.assertEqual(copied_layer.units, 5)
self.assertEqual(original_layer.get_config(), original_layer.get_config())
# Copy layer before layer call on inputs without weight initialization.
inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32)
original_layer = rnn.GRU(4)
copied_layer = copy.deepcopy(original_layer)
outputs = original_layer(inputs)
copied_outputs = copied_layer(inputs)
self.assertNotAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs))
# Copy layer after layer call on inputs with weight initialization.
original_layer = rnn.GRU(4)
outputs = original_layer(inputs)
copied_layer = copy.deepcopy(original_layer)
copied_outputs = copied_layer(inputs)
self.assertAllClose(self.evaluate(outputs), self.evaluate(copied_outputs))
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
class GRULayerGradientTapeTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager']))
def test_in_tape(self):
with self.test_session(config=_config):
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru = rnn.GRU(gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
x = random_ops.random_uniform([1, time_steps, embedding_size])
y = random_ops.random_uniform([1, gru_unit_size])
with backprop.GradientTape() as tape:
hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)
_, state = gru(x, initial_state=hidden_state)
loss = math_ops.reduce_mean(math_ops.square(state - y))
tape.gradient(loss, gru.variables)
@testing_utils.run_all_without_tensor_float_32('RNN GRU can use TF32 on GPU')
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_GRU_runtime(self):
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@testing_utils.run_v2_only
def test_GRU_runtime_with_mask(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
# Masking will affect which backend is selected based on whether the mask
# is strictly right padded.
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_GRU_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
zeros = array_ops.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the GRU layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
if __name__ == '__main__':
test.main()
|
|
import copy
from datetime import date
from django.test import TestCase
import six
from regcore.db.django_models import DMDiffs, DMLayers, DMNotices, DMDocuments
from regcore.models import Diff, Layer, Notice, Document
class DMDocumentsTest(TestCase):
def setUp(self):
self.dmr = DMDocuments()
def test_get_404(self):
self.assertIsNone(self.dmr.get('lablab', 'verver'))
def test_get_cfr(self):
Document(doc_type='cfr', version='verver', label_string='a-b',
text='ttt', node_type='tyty').save()
self.assertEqual(
{
'text': 'ttt',
'label': ['a', 'b'],
'children': [],
'node_type': 'tyty'
},
self.dmr.get('cfr', 'a-b', 'verver'),
)
def test_get_preamble(self):
Document(doc_type='preamble', version='verver', label_string='a-b',
text='ttt', node_type='tyty').save()
self.assertEqual(
{
'text': 'ttt',
'label': ['a', 'b'],
'children': [],
'node_type': 'tyty'
},
self.dmr.get('preamble', 'a-b', 'verver'),
)
def test_listing(self):
Document(id='ver1-a-b', doc_type='cfr', version='ver1',
label_string='a-b', text='textex', node_type='ty').save()
Document(id='aaa-a-b', doc_type='cfr', version='aaa',
label_string='a-b', text='textex', node_type='ty').save()
Document(id='333-a-b', doc_type='cfr', version='333',
label_string='a-b', text='textex', node_type='ty').save()
Document(id='four-a-b', doc_type='cfr', version='four',
label_string='a-b', text='textex', node_type='ty').save()
results = self.dmr.listing('cfr', 'a-b')
self.assertEqual([('333', 'a-b'), ('aaa', 'a-b'), ('four', 'a-b'),
('ver1', 'a-b')], results)
Document(id='ver1-1111', doc_type='cfr', version='ver1',
label_string='1111', text='aaaa', node_type='ty',
root=True).save()
Document(id='ver2-1111', doc_type='cfr', version='ver2',
label_string='1111', text='bbbb', node_type='ty',
root=True).save()
Document(id='ver3-1111', doc_type='cfr', version='ver3',
label_string='1111', text='cccc', node_type='ty',
root=False).save()
results = self.dmr.listing('cfr')
self.assertEqual([('ver1', '1111'), ('ver2', '1111')], results)
def test_bulk_put(self):
"""Writing multiple documents should save correctly. They can be
modified"""
n2 = {'text': 'some text', 'label': ['111', '2'], 'children': [],
'node_type': 'tyty'}
n3 = {'text': 'other', 'label': ['111', '3'], 'children': [],
'node_type': 'tyty2'}
root = {'text': 'root', 'label': ['111'], 'node_type': 'tyty3',
'children': [n2, n3]}
original = copy.deepcopy(root)
n2['parent'] = root
n3['parent'] = root
nodes = [root, n2, n3]
self.dmr.bulk_put(nodes, 'cfr', '111', 'verver')
self.assertEqual(DMDocuments().get('cfr', '111', 'verver'), original)
root['title'] = original['title'] = 'New Title'
self.dmr.bulk_put(nodes, 'cfr', '111', 'verver')
self.assertEqual(DMDocuments().get('cfr', '111', 'verver'), original)
class DMLayersTest(TestCase):
def setUp(self):
self.dml = DMLayers()
def test_get_404(self):
self.assertIsNone(self.dml.get('namnam', 'cfr', 'verver/lablab'))
def test_get_success(self):
Layer(name='namnam', doc_type='cfr', doc_id='verver/lablab',
layer={"some": "body"}).save()
self.assertEqual({"some": 'body'},
self.dml.get('namnam', 'cfr', 'verver/lablab'))
def test_bulk_put(self):
"""Writing multiple documents should save correctly. They can be
modified"""
layers = [{'111-22': [], '111-22-a': [], 'doc_id': 'verver/111-22'},
{'111-23': [], 'doc_id': 'verver/111-23'}]
self.dml.bulk_put(layers, 'name', 'cfr', 'verver/111')
self.assertEqual(Layer.objects.count(), 2)
self.assertEqual(self.dml.get('name', 'cfr', 'verver/111-22'),
{'111-22': [], '111-22-a': []})
self.assertEqual(self.dml.get('name', 'cfr', 'verver/111-23'),
{'111-23': []})
layers[1] = {'111-23': [1], 'doc_id': 'verver/111-23'}
self.dml.bulk_put(layers, 'name', 'cfr', 'verver/111')
self.assertEqual(Layer.objects.count(), 2)
self.assertEqual(self.dml.get('name', 'cfr', 'verver/111-23'),
{'111-23': [1]})
class DMNoticesTest(TestCase):
def setUp(self):
self.dmn = DMNotices()
def test_get_404(self):
self.assertIsNone(self.dmn.get('docdoc'))
def test_get_success(self):
Notice(document_number='docdoc', fr_url='frfr',
publication_date=date.today(),
notice={"some": "body"}).save()
self.assertEqual({"some": 'body'}, self.dmn.get('docdoc'))
def test_listing(self):
n = Notice(document_number='22', fr_url='fr1', notice={},
effective_on=date(2005, 5, 5),
publication_date=date(2001, 3, 3))
n.save()
n.noticecfrpart_set.create(cfr_part='876')
n = Notice(document_number='9', fr_url='fr2', notice={},
publication_date=date(1999, 1, 1))
n.noticecfrpart_set.create(cfr_part='876')
n.noticecfrpart_set.create(cfr_part='111')
n.save()
self.assertEqual([{'document_number': '22', 'fr_url': 'fr1',
'publication_date': '2001-03-03',
'effective_on': '2005-05-05'},
{'document_number': '9', 'fr_url': 'fr2',
'publication_date': '1999-01-01'}],
self.dmn.listing())
self.assertEqual(self.dmn.listing(), self.dmn.listing('876'))
self.assertEqual([], self.dmn.listing('888'))
def test_put(self):
"""We can insert and replace a notice"""
doc = {"some": "structure",
'effective_on': '2011-01-01',
'fr_url': 'http://example.com',
'publication_date': '2010-02-02',
'cfr_parts': ['222']}
self.dmn.put('docdoc', doc)
expected = {"document_number": "docdoc",
"effective_on": date(2011, 1, 1),
"fr_url": "http://example.com",
"publication_date": date(2010, 2, 2),
"noticecfrpart__cfr_part": '222',
"notice": doc}
fields = expected.keys()
six.assertCountEqual(self, Notice.objects.all().values(*fields),
[expected])
doc['fr_url'] = 'url2'
self.dmn.put('docdoc', doc)
expected['fr_url'] = 'url2'
six.assertCountEqual(self, Notice.objects.all().values(*fields),
[expected])
class DMDiffTest(TestCase):
def setUp(self):
self.dmd = DMDiffs()
def test_get_404(self):
self.assertIsNone(self.dmd.get('lablab', 'oldold', 'newnew'))
def test_get_success(self):
Diff(label='lablab', old_version='oldold', new_version='newnew',
diff={"some": "body"}).save()
self.assertEqual({"some": 'body'},
self.dmd.get('lablab', 'oldold', 'newnew'))
def test_put(self):
"""We can insert and replace a diff"""
self.dmd.put('lablab', 'oldold', 'newnew', {"some": "structure"})
expected = {"label": "lablab", "old_version": "oldold",
"new_version": "newnew", "diff": {"some": "structure"}}
fields = expected.keys()
six.assertCountEqual(self, Diff.objects.all().values(*fields),
[expected])
self.dmd.put('lablab', 'oldold', 'newnew', {"other": "structure"})
expected['diff'] = {'other': 'structure'}
six.assertCountEqual(self, Diff.objects.all().values(*fields),
[expected])
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Alec Thomas
# Copyright (C) 2007 Eli Carter
# Copyright (C) 2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Eli Carter
import pkg_resources
from ConfigParser import RawConfigParser
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Configuration, ConfigSection
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionSystem
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.ticket.model import Resolution
from trac.util.text import obfuscate_email_address
from trac.util.translation import _, tag_, cleandoc_
from trac.web.chrome import Chrome, add_script, add_script_data
from trac.wiki.macros import WikiMacroBase
# -- Utilities for the ConfigurableTicketWorkflow
def parse_workflow_config(rawactions):
"""Given a list of options from [ticket-workflow]"""
actions = {}
for option, value in rawactions:
parts = option.split('.')
action = parts[0]
if action not in actions:
actions[action] = {'oldstates': '', 'newstate': ''}
if len(parts) == 1:
# Base name, of the syntax: old,states,here -> newstate
try:
oldstates, newstate = [x.strip() for x in value.split('->')]
except ValueError:
continue # Syntax error, a warning will be logged later
actions[action]['newstate'] = newstate
actions[action]['oldstates'] = oldstates
else:
action, attribute = option.split('.')
actions[action][attribute] = value
# Fill in the defaults for every action, and normalize them to the desired
# types
def as_list(key):
value = attributes.get(key, '')
return [item for item in (x.strip() for x in value.split(',')) if item]
for action, attributes in actions.items():
# Default the 'name' attribute to the name used in the ini file
if 'name' not in attributes:
attributes['name'] = action
# If not specified, an action is not the default.
attributes['default'] = int(attributes.get('default', 0))
# If operations are not specified, that means no operations
attributes['operations'] = as_list('operations')
# If no permissions are specified, then no permissions are needed
attributes['permissions'] = as_list('permissions')
# Normalize the oldstates
attributes['oldstates'] = as_list('oldstates')
return actions
def get_workflow_config(config):
"""Usually passed self.config, this will return the parsed ticket-workflow
section.
"""
raw_actions = list(config.options('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
return actions
def load_workflow_config_snippet(config, filename):
"""Loads the ticket-workflow section from the given file (expected to be in
the 'workflows' tree) into the provided config.
"""
filename = pkg_resources.resource_filename('trac.ticket',
'workflows/%s' % filename)
new_config = Configuration(filename)
for name, value in new_config.options('ticket-workflow'):
config.set('ticket-workflow', name, value)
class ConfigurableTicketWorkflow(Component):
"""Ticket action controller which provides actions according to a
workflow defined in trac.ini.
The workflow is idefined in the `[ticket-workflow]` section of the
[wiki:TracIni#ticket-workflow-section trac.ini] configuration file.
"""
ticket_workflow_section = ConfigSection('ticket-workflow',
"""The workflow for tickets is controlled by plugins. By default,
there's only a `ConfigurableTicketWorkflow` component in charge.
That component allows the workflow to be configured via this section
in the `trac.ini` file. See TracWorkflow for more details.
(''since 0.11'')""")
def __init__(self, *args, **kwargs):
self.actions = get_workflow_config(self.config)
if not '_reset' in self.actions:
# Special action that gets enabled if the current status no longer
# exists, as no other action can then change its state. (#5307)
self.actions['_reset'] = {
'default': 0,
'name': 'reset',
'newstate': 'new',
'oldstates': [], # Will not be invoked unless needed
'operations': ['reset_workflow'],
'permissions': []}
self.log.debug('Workflow actions at initialization: %s\n' %
str(self.actions))
for name, info in self.actions.iteritems():
if not info['newstate']:
self.log.warning("Ticket workflow action '%s' doesn't define "
"any transitions", name)
implements(ITicketActionController, IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""When an environment is created, we provide the basic-workflow,
unless a ticket-workflow section already exists.
"""
if not 'ticket-workflow' in self.config.sections():
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
def environment_needs_upgrade(self, db):
"""The environment needs an upgrade if there is no [ticket-workflow]
section in the config.
"""
return not list(self.config.options('ticket-workflow'))
def upgrade_environment(self, db):
"""Insert a [ticket-workflow] section using the original-workflow"""
load_workflow_config_snippet(self.config, 'original-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
info_message = """
==== Upgrade Notice ====
The ticket Workflow is now configurable.
Your environment has been upgraded, but configured to use the original
workflow. It is recommended that you look at changing this configuration to use
basic-workflow.
Read TracWorkflow for more information (don't forget to 'wiki upgrade' as well)
"""
self.log.info(info_message.replace('\n', ' ').replace('==', ''))
print info_message
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
"""Returns a list of (weight, action) tuples that are valid for this
request and this ticket."""
# Get the list of actions that can be performed
# Determine the current status of this ticket. If this ticket is in
# the process of being modified, we need to base our information on the
# pre-modified state so that we don't try to do two (or more!) steps at
# once and get really confused.
status = ticket._old.get('status', ticket['status']) or 'new'
ticket_perm = req.perm(ticket.resource)
allowed_actions = []
for action_name, action_info in self.actions.items():
oldstates = action_info['oldstates']
if oldstates == ['*'] or status in oldstates:
# This action is valid in this state. Check permissions.
required_perms = action_info['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((action_info['default'],
action_name))
if not (status in ['new', 'closed'] or \
status in TicketSystem(self.env).get_all_status()) \
and 'TICKET_ADMIN' in ticket_perm:
# State no longer exists - add a 'reset' action if admin.
allowed_actions.append((0, '_reset'))
return allowed_actions
def _is_action_allowed(self, ticket_perm, required_perms):
if not required_perms:
return True
for permission in required_perms:
if permission in ticket_perm:
return True
return False
def get_all_status(self):
"""Return a list of all states described by the configuration.
"""
all_status = set()
for action_name, action_info in self.actions.items():
all_status.update(action_info['oldstates'])
all_status.add(action_info['newstate'])
all_status.discard('*')
all_status.discard('')
return all_status
def render_ticket_action_control(self, req, ticket, action):
self.log.debug('render_ticket_action_control: action "%s"' % action)
this_action = self.actions[action]
status = this_action['newstate']
operations = this_action['operations']
current_owner = ticket._old.get('owner', ticket['owner'] or '(none)')
if not (Chrome(self.env).show_email_addresses
or 'EMAIL_VIEW' in req.perm(ticket.resource)):
format_user = obfuscate_email_address
else:
format_user = lambda address: address
current_owner = format_user(current_owner)
control = [] # default to nothing
hints = []
if 'reset_workflow' in operations:
control.append(tag("from invalid state "))
hints.append(_("Current state no longer exists"))
if 'del_owner' in operations:
hints.append(_("The ticket will be disowned"))
if 'set_owner' in operations:
id = 'action_%s_reassign_owner' % action
selected_owner = req.args.get(id, req.authname)
if this_action.has_key('set_owner'):
owners = [x.strip() for x in
this_action['set_owner'].split(',')]
elif self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
owners = perm.get_users_with_permission('TICKET_MODIFY')
owners.sort()
else:
owners = None
if owners == None:
owner = req.args.get(id, req.authname)
control.append(tag_('to %(owner)s',
owner=tag.input(type='text', id=id,
name=id, value=owner)))
hints.append(_("The owner will be changed from "
"%(current_owner)s",
current_owner=current_owner))
elif len(owners) == 1:
owner = tag.input(type='hidden', id=id, name=id,
value=owners[0])
formatted_owner = format_user(owners[0])
control.append(tag_('to %(owner)s ',
owner=tag(formatted_owner, owner)))
if ticket['owner'] != owners[0]:
hints.append(_("The owner will be changed from "
"%(current_owner)s to %(selected_owner)s",
current_owner=current_owner,
selected_owner=formatted_owner))
else:
control.append(tag_('to %(owner)s', owner=tag.select(
[tag.option(x, value=x,
selected=(x == selected_owner or None))
for x in owners],
id=id, name=id)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the selected user",
current_owner=current_owner))
elif 'set_owner_to_self' in operations and \
ticket._old.get('owner', ticket['owner']) != req.authname:
hints.append(_("The owner will be changed from %(current_owner)s "
"to %(authname)s", current_owner=current_owner,
authname=req.authname))
if 'set_resolution' in operations:
if this_action.has_key('set_resolution'):
resolutions = [x.strip() for x in
this_action['set_resolution'].split(',')]
else:
resolutions = [val.name for val in Resolution.select(self.env)]
if not resolutions:
raise TracError(_("Your workflow attempts to set a resolution "
"but none is defined (configuration issue, "
"please contact your Trac admin)."))
id = 'action_%s_resolve_resolution' % action
if len(resolutions) == 1:
resolution = tag.input(type='hidden', id=id, name=id,
value=resolutions[0])
control.append(tag_('as %(resolution)s',
resolution=tag(resolutions[0],
resolution)))
hints.append(_("The resolution will be set to %(name)s",
name=resolutions[0]))
else:
selected_option = req.args.get(id,
TicketSystem(self.env).default_resolution)
control.append(tag_('as %(resolution)s',
resolution=tag.select(
[tag.option(x, value=x,
selected=(x == selected_option or None))
for x in resolutions],
id=id, name=id)))
hints.append(_("The resolution will be set"))
if 'del_resolution' in operations:
hints.append(_("The resolution will be deleted"))
if 'leave_status' in operations:
control.append(_('as %(status)s ',
status= ticket._old.get('status',
ticket['status'])))
else:
if status != '*':
hints.append(_("Next status will be '%(name)s'", name=status))
return (this_action['name'], tag(*control), '. '.join(hints) + ".")
def get_ticket_changes(self, req, ticket, action):
this_action = self.actions[action]
# Enforce permissions
if not self._has_perms_for_action(req, this_action, ticket.resource):
# The user does not have any of the listed permissions, so we won't
# do anything.
return {}
updated = {}
# Status changes
status = this_action['newstate']
if status != '*':
updated['status'] = status
for operation in this_action['operations']:
if operation == 'reset_workflow':
updated['status'] = 'new'
elif operation == 'del_owner':
updated['owner'] = ''
elif operation == 'set_owner':
newowner = req.args.get('action_%s_reassign_owner' % action,
this_action.get('set_owner', '').strip())
# If there was already an owner, we get a list, [new, old],
# but if there wasn't we just get new.
if type(newowner) == list:
newowner = newowner[0]
updated['owner'] = newowner
elif operation == 'set_owner_to_self':
updated['owner'] = req.authname
elif operation == 'del_resolution':
updated['resolution'] = ''
elif operation == 'set_resolution':
newresolution = req.args.get('action_%s_resolve_resolution' % \
action,
this_action.get('set_resolution', '').strip())
updated['resolution'] = newresolution
# leave_status is just a no-op here, so we don't look for it.
return updated
def apply_action_side_effects(self, req, ticket, action):
pass
def _has_perms_for_action(self, req, action, resource):
required_perms = action['permissions']
if required_perms:
for permission in required_perms:
if permission in req.perm(resource):
break
else:
# The user does not have any of the listed permissions
return False
return True
# Public methods (for other ITicketActionControllers that want to use
# our config file and provide an operation for an action)
def get_actions_by_operation(self, operation):
"""Return a list of all actions with a given operation
(for use in the controller's get_all_status())
"""
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations']]
return actions
def get_actions_by_operation_for_req(self, req, ticket, operation):
"""Return list of all actions with a given operation that are valid
in the given state for the controller's get_ticket_actions().
If state='*' (the default), all actions with the given operation are
returned.
"""
# Be sure to look at the original status.
status = ticket._old.get('status', ticket['status'])
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations'] and
('*' in info['oldstates'] or
status in info['oldstates']) and
self._has_perms_for_action(req, info, ticket.resource)]
return actions
class WorkflowMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Render a workflow graph.
This macro accepts a TracWorkflow configuration and renders the states
and transitions as a directed graph. If no parameters are given, the
current ticket workflow is rendered. In WikiProcessors mode the `width`
and `height` arguments can be specified.
(Defaults: `width = 800` and `heigth = 600`)
Examples:
{{{
[[Workflow()]]
[[Workflow(go = here -> there; return = there -> here)]]
{{{
#!Workflow width=700 height=700
leave = * -> *
leave.operations = leave_status
leave.default = 1
accept = new,assigned,accepted,reopened -> accepted
accept.permissions = TICKET_MODIFY
accept.operations = set_owner_to_self
resolve = new,assigned,accepted,reopened -> closed
resolve.permissions = TICKET_MODIFY
resolve.operations = set_resolution
reassign = new,assigned,accepted,reopened -> assigned
reassign.permissions = TICKET_MODIFY
reassign.operations = set_owner
reopen = closed -> reopened
reopen.permissions = TICKET_CREATE
reopen.operations = del_resolution
}}}
}}}
""")
def expand_macro(self, formatter, name, text, args):
if not text:
raw_actions = self.config.options('ticket-workflow')
else:
if args is None:
text = '\n'.join([line.lstrip() for line in text.split(';')])
if not '[ticket-workflow]' in text:
text = '[ticket-workflow]\n' + text
parser = RawConfigParser()
parser.readfp(StringIO(text))
raw_actions = list(parser.items('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
states = list(set(
[state for action in actions.itervalues()
for state in action['oldstates']] +
[action['newstate'] for action in actions.itervalues()]))
action_names = actions.keys()
edges = []
for name, action in actions.items():
new_index = states.index(action['newstate'])
name_index = action_names.index(name)
for old_state in action['oldstates']:
old_index = states.index(old_state)
edges.append((old_index, new_index, name_index))
args = args or {}
graph = {'nodes': states, 'actions': action_names, 'edges': edges,
'width': args.get('width', 800),
'height': args.get('height', 600)}
graph_id = '%012x' % id(graph)
req = formatter.req
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/workflow_graph.js')
add_script_data(req, {'graph_%s' % graph_id: graph})
return tag.div(_("Enable JavaScript to display the workflow graph."),
class_='trac-workflow-graph system-message',
id='trac-workflow-graph-%s' % graph_id)
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
self.stats = stats.Stats()
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning(_LW("Host field should not be set on the instance "
"until resources have been claimed."),
instance=instance)
if instance.node:
LOG.warning(_LW("Node field should not be set on the instance "
"until resources have been claimed."),
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
move_type='evacuation', limits=limits,
image_meta=image_meta, migration=migration)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
image_meta=image_meta, limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
move_type=None, image_meta=None, limits=None,
migration=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
# Remove usage for an incoming/outgoing migration on the destination
# node.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.pop(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info(_LI('Compute_service record created for '
'%(host)s:%(node)s'),
{'host': self.host, 'node': nodename})
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except Exception as exc:
LOG.warning(_LW("Cannot get the metrics from %(mon)s; "
"error: %(exc)s"),
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context, nodename):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
def _pair_instances_to_migrations(self, migrations, instances):
instance_by_uuid = {inst.uuid: inst for inst in instances}
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialize the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations, nodename)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug("Hypervisor: free VCPUs: %s", free_vcpus)
else:
free_vcpus = 'unknown'
LOG.debug("Hypervisor: VCPU information unavailable")
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.info(_LI("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s"),
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update(self, context, compute_node):
"""Update partial stats locally and populate them to Scheduler."""
if not self._resource_change(compute_node):
return
nodename = compute_node.hypervisor_hostname
compute_node.save()
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
self.scheduler_client.set_inventory_for_provider(
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(compute_node)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
cn.running_vms = self.stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info(_LI("Updating from migration %s"), uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning(_LW("Instance not resizing, skipping migration."),
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor could not be found, skipping "
"migration."), instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
cn = self.compute_nodes[nodename]
self.stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
self.scheduler_client.reportclient.update_instance_allocation(
cn, instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
cn.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = 0
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
self.scheduler_client.reportclient.remove_deleted_instances(
cn, self.tracked_instances.values())
cn.free_ram_mb = max(0, cn.free_ram_mb)
cn.free_disk_gb = max(0, cn.free_disk_gb)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning(_LW("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)"),
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': object_or_dict.flavor.root_gb,
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
|
|
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import inspect
import json
import os
from aodhclient.v2 import base as aodh_base
from aodhclient.v2 import client as aodhclient
from barbicanclient import base as barbican_base
from barbicanclient import client as barbicanclient
from ceilometerclient.v2 import client as ceilometerclient
from cinderclient.apiclient import base as cinder_base
from cinderclient.v2 import client as cinderclient
from designateclient import client as designateclient
from glanceclient.v2 import client as glanceclient
from gnocchiclient.v1 import base as gnocchi_base
from gnocchiclient.v1 import client as gnocchiclient
from heatclient.common import base as heat_base
from heatclient.v1 import client as heatclient
from ironicclient.common import base as ironic_base
from ironicclient.v1 import client as ironicclient
from keystoneclient import base as keystone_base
from keystoneclient.v3 import client as keystoneclient
from magnumclient.common import base as magnum_base
from magnumclient.v1 import client as magnumclient
from mistralclient.api import base as mistral_base
from mistralclient.api.v2 import client as mistralclient
from muranoclient.common import base as murano_base
from muranoclient.v1 import client as muranoclient
from novaclient import base as nova_base
from novaclient import client as novaclient
from troveclient import base as trove_base
from troveclient.v1 import client as troveclient
# TODO(nmakhotkin): Find a rational way to do it for neutron.
# TODO(nmakhotkin): Implement recursive way of searching for managers
# TODO(nmakhotkin): (e.g. keystone).
# TODO(dprince): Need to update ironic_inspector_client before we can
# plug it in cleanly here.
# TODO(dprince): Swiftclient doesn't currently support discovery
# like we do in this class.
# TODO(therve): Zaqarclient doesn't currently support discovery
# like we do in this class.
# TODO(sa709c): Tackerclient doesn't currently support discovery
# like we do in this class.
"""It is simple CLI tool which allows to see and update mapping.json file
if needed. mapping.json contains all allowing OpenStack actions sorted by
service name. Usage example:
python tools/get_action_list.py nova
The result will be simple JSON containing action name as a key and method
path as a value. For updating mapping.json it is need to copy all keys and
values of the result to corresponding section of mapping.json:
...mapping.json...
"nova": {
<put it here>
},
...mapping.json...
Note: in case of Keystone service, correct OS_AUTH_URL v3 and the rest auth
info must be provided. It can be provided either via environment variables
or CLI arguments. See --help for details.
"""
BASE_HEAT_MANAGER = heat_base.HookableMixin
BASE_NOVA_MANAGER = nova_base.HookableMixin
BASE_KEYSTONE_MANAGER = keystone_base.Manager
BASE_CINDER_MANAGER = cinder_base.HookableMixin
BASE_MISTRAL_MANAGER = mistral_base.ResourceManager
BASE_TROVE_MANAGER = trove_base.Manager
BASE_IRONIC_MANAGER = ironic_base.Manager
BASE_BARBICAN_MANAGER = barbican_base.BaseEntityManager
BASE_MAGNUM_MANAGER = magnum_base.Manager
BASE_MURANO_MANAGER = murano_base.Manager
BASE_AODH_MANAGER = aodh_base.Manager
BASE_GNOCCHI_MANAGER = gnocchi_base.Manager
def get_parser():
parser = argparse.ArgumentParser(
description='Gets All needed methods of OpenStack clients.',
usage="python get_action_list.py <service_name>"
)
parser.add_argument(
'service',
choices=CLIENTS.keys(),
help='Service name which methods need to be found.'
)
parser.add_argument(
'--os-username',
dest='username',
default=os.environ.get('OS_USERNAME', 'admin'),
help='Authentication username (Env: OS_USERNAME)'
)
parser.add_argument(
'--os-password',
dest='password',
default=os.environ.get('OS_PASSWORD', 'openstack'),
help='Authentication password (Env: OS_PASSWORD)'
)
parser.add_argument(
'--os-tenant-name',
dest='tenant_name',
default=os.environ.get('OS_TENANT_NAME', 'Default'),
help='Authentication tenant name (Env: OS_TENANT_NAME)'
)
parser.add_argument(
'--os-auth-url',
dest='auth_url',
default=os.environ.get('OS_AUTH_URL'),
help='Authentication URL (Env: OS_AUTH_URL)'
)
return parser
GLANCE_NAMESPACE_LIST = [
'image_members', 'image_tags', 'images', 'schemas', 'tasks',
'metadefs_resource_type', 'metadefs_property', 'metadefs_object',
'metadefs_tag', 'metadefs_namespace', 'versions'
]
CEILOMETER_NAMESPACE_LIST = [
'alarms', 'capabilities', 'event_types', 'events', 'meters',
'new_samples', 'query_alarm_history', 'query_alarms', 'query_samples',
'resources', 'samples', 'statistics', 'trait_descriptions', 'traits'
]
DESIGNATE_NAMESPACE_LIST = [
'diagnostics', 'domains', 'quotas', 'records', 'reports', 'servers',
'sync', 'touch'
]
def get_nova_client(**kwargs):
return novaclient.Client(2)
def get_keystone_client(**kwargs):
return keystoneclient.Client(**kwargs)
def get_glance_client(**kwargs):
return glanceclient.Client(kwargs.get('auth_url'))
def get_heat_client(**kwargs):
return heatclient.Client('')
def get_ceilometer_client(**kwargs):
return ceilometerclient.Client('')
def get_cinder_client(**kwargs):
return cinderclient.Client()
def get_mistral_client(**kwargs):
return mistralclient.Client()
def get_trove_client(**kwargs):
return troveclient.Client('username', 'password')
def get_ironic_client(**kwargs):
return ironicclient.Client("http://127.0.0.1:6385/")
def get_barbican_client(**kwargs):
return barbicanclient.Client(
project_id="1",
endpoint="http://127.0.0.1:9311"
)
def get_designate_client(**kwargs):
return designateclient.Client('1')
def get_magnum_client(**kwargs):
return magnumclient.Client()
def get_murano_client(**kwargs):
return muranoclient.Client('')
def get_aodh_client(**kwargs):
return aodhclient.Client('')
def get_gnocchi_client(**kwargs):
return gnocchiclient.Client()
CLIENTS = {
'nova': get_nova_client,
'heat': get_heat_client,
'ceilometer': get_ceilometer_client,
'cinder': get_cinder_client,
'keystone': get_keystone_client,
'glance': get_glance_client,
'trove': get_trove_client,
'ironic': get_ironic_client,
'barbican': get_barbican_client,
'mistral': get_mistral_client,
'designate': get_designate_client,
'magnum': get_magnum_client,
'murano': get_murano_client,
'aodh': get_aodh_client,
'gnocchi': get_gnocchi_client,
# 'neutron': get_nova_client
# 'baremetal_introspection': ...
# 'swift': ...
# 'zaqar': ...
}
BASE_MANAGERS = {
'nova': BASE_NOVA_MANAGER,
'heat': BASE_HEAT_MANAGER,
'ceilometer': None,
'cinder': BASE_CINDER_MANAGER,
'keystone': BASE_KEYSTONE_MANAGER,
'glance': None,
'trove': BASE_TROVE_MANAGER,
'ironic': BASE_IRONIC_MANAGER,
'barbican': BASE_BARBICAN_MANAGER,
'mistral': BASE_MISTRAL_MANAGER,
'designate': None,
'magnum': BASE_MAGNUM_MANAGER,
'murano': BASE_MURANO_MANAGER,
'aodh': BASE_AODH_MANAGER,
'gnocchi': BASE_GNOCCHI_MANAGER,
# 'neutron': BASE_NOVA_MANAGER
# 'baremetal_introspection': ...
# 'swift': ...
# 'zaqar': ...
}
NAMESPACES = {
'glance': GLANCE_NAMESPACE_LIST,
'ceilometer': CEILOMETER_NAMESPACE_LIST,
'designate': DESIGNATE_NAMESPACE_LIST
}
ALLOWED_ATTRS = ['service_catalog', 'catalog']
FORBIDDEN_METHODS = [
'add_hook', 'alternate_service_type', 'completion_cache', 'run_hooks',
'write_to_completion_cache', 'model', 'build_key_only_query', 'build_url',
'head', 'put', 'unvalidated_model'
]
def get_public_attrs(obj):
all_attrs = dir(obj)
return [a for a in all_attrs if not a.startswith('_')]
def get_public_methods(attr, client):
hierarchy_list = attr.split('.')
attribute = client
for attr in hierarchy_list:
attribute = getattr(attribute, attr)
all_attributes_list = get_public_attrs(attribute)
methods = []
for a in all_attributes_list:
allowed = a in ALLOWED_ATTRS
forbidden = a in FORBIDDEN_METHODS
if (not forbidden and
(allowed or inspect.ismethod(getattr(attribute, a)))):
methods.append(a)
return methods
def get_manager_list(service_name, client):
base_manager = BASE_MANAGERS[service_name]
if not base_manager:
return NAMESPACES[service_name]
public_attrs = get_public_attrs(client)
manager_list = []
for attr in public_attrs:
if (isinstance(getattr(client, attr), base_manager)
or attr in ALLOWED_ATTRS):
manager_list.append(attr)
return manager_list
def get_mapping_for_service(service, client):
mapping = collections.OrderedDict()
for man in get_manager_list(service, client):
public_methods = get_public_methods(man, client)
for method in public_methods:
key = "%s_%s" % (man, method)
value = "%s.%s" % (man, method)
mapping[key] = value
return mapping
def print_mapping(mapping):
print(json.dumps(mapping, indent=8, separators=(',', ': ')))
if __name__ == "__main__":
args = get_parser().parse_args()
auth_info = {
'username': args.username,
'tenant_name': args.tenant_name,
'password': args.password,
'auth_url': args.auth_url
}
service = args.service
client = CLIENTS.get(service)(**auth_info)
print("Find methods for service: %s..." % service)
print_mapping(get_mapping_for_service(service, client))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2007-2012 Heikki Hokkanen <hoxu@users.sf.net>
# & others (see doc/author.txt)
# GPLv2 / GPLv3
from datetime import datetime
from pytz import FixedOffset
from vilya.libs.consts import LANGUAGES
from vilya.libs.store import cache, ONE_DAY
from vilya.models.user import get_author_by_email
from .consts import conf
from .utils import getcommitrange, getkeyssortedbyvaluekey
from .collector import DataCollector
REPO_EXTENSION_KEY = "repo:%s:stats:extension1"
class GitDataCollector(DataCollector):
def __init__(self, gyt_repo):
super(GitDataCollector, self).__init__()
self.gyt_repo = gyt_repo
@classmethod
def _get_author_email(cls, author_email):
author, email = author_email.split('<', 1)
author = author.rstrip()
email = email.rstrip('>')
return (author, email)
def commitrange(self, head, end=None):
return getcommitrange(head, end)
def fill_shortstat(self, commitrange):
try:
to_ref, from_ref = commitrange
block_of_lines = self.gyt_repo.repo.log(to_ref,
from_ref=from_ref,
shortstat=True,
no_merges=True,
reverse=True
)
except:
block_of_lines = []
return block_of_lines
@cache(REPO_EXTENSION_KEY % '{proj_name}', expire=ONE_DAY)
def compute_file_size_and_extensions(self, proj_name):
# extensions and size of files
# this should only run once than only compute delta
extensions = {}
total_size = 0
total_files = 0
source_files = 0
source_lines = 0
lines = self.gyt_repo.repo.ls_tree('HEAD', recursive=True, size=True)
for line in lines:
if line[0] == '160000' and line[3] == '-':
# skip submodules
continue
sha1 = line[2]
size = int(line[3])
fullpath = line[4]
total_size += size
total_files += 1
filename = fullpath.split('/')[-1] # strip directories
if filename.find('.') == -1 or filename.rfind('.') == 0:
ext = ''
else:
ext = filename[(filename.rfind('.') + 1):]
if len(ext) > conf['max_ext_length']:
ext = ''
name = LANGUAGES.get(ext, None)
if name not in extensions:
if ext in LANGUAGES.keys():
name = LANGUAGES[ext]
extensions[name] = {'files': 0, 'lines': 0}
else:
continue
extensions[name]['files'] += 1
source_files += 1
try:
# should be text files
count = self.getLinesInBlob(sha1)
extensions[name]['lines'] += count
source_lines += count
except:
pass
return extensions, total_files, total_size, source_files, source_lines
def fill_rev_list_commitrange(self, commitrange):
to_ref, from_ref = commitrange
commits = self.gyt_repo.repo.rev_list(to_ref, from_ref)
for commit in commits:
author = commit.author.name
# email = email_normalizer(commit.author.name,
# commit.author.email)
email = commit.author.email
stamp = commit.committer.time
date = datetime.fromtimestamp(
commit.committer.time,
FixedOffset(commit.committer.offset)
)
author = get_author_by_email(email, author)
if author in conf['merge_authors']:
author = conf['merge_authors'][author]
# First and last commit stamp
# (may be in any order because of cherry-picking and patches)
if stamp > self.last_commit_stamp:
self.last_commit_stamp = stamp
if self.first_commit_stamp == 0 or stamp < self.first_commit_stamp:
self.first_commit_stamp = stamp
# yearly/weekly activity
yyw = date.strftime('%Y-%W')
self.year_week_act[yyw] += 1
if self.year_week_act_peak < self.year_week_act[yyw]:
self.year_week_act_peak = self.year_week_act[yyw]
# author stats
if author not in self.authors:
self.authors[author] = {
'lines_added': 0,
'lines_removed': 0,
'commits': 0,
}
# commits, note again that commits may be in any date order
# because of cherry-picking and patches
if 'last_commit_stamp' not in self.authors[author]:
self.authors[author]['last_commit_stamp'] = stamp
if stamp > self.authors[author]['last_commit_stamp']:
self.authors[author]['last_commit_stamp'] = stamp
if 'first_commit_stamp' not in self.authors[author]:
self.authors[author]['first_commit_stamp'] = stamp
if stamp < self.authors[author]['first_commit_stamp']:
self.authors[author]['first_commit_stamp'] = stamp
if 'email' not in self.authors[author]:
self.authors[author]['email'] = email
def fill_short_stats_commitrange(self, commitrange):
to_ref, from_ref = commitrange
if to_ref == 'HEAD' and from_ref is None:
total_lines = 0
else:
total_lines = self.total_lines
for commit in self.fill_shortstat(commitrange):
files = commit['files']
inserted = commit['additions']
deleted = commit['deletions']
total_lines += inserted
total_lines -= deleted
self.total_lines_added += inserted
self.total_lines_removed += deleted
stamp = commit['committer_time']
author = commit['author_name']
email = commit['author_email']
author = get_author_by_email(email, author)
if author in conf['merge_authors']:
author = conf['merge_authors'][author]
self.changes_by_date[stamp] = {
'files': files,
'ins': inserted,
'del': deleted,
'lines': total_lines
}
self.process_line_user(author, stamp, inserted, deleted)
self.total_lines = total_lines
def collect(self, dir, proj_name, head, n_author):
DataCollector.collect(self, dir)
self.loadCache(proj_name, '/stats/' + proj_name, n_author)
last_sha = self.cache and self.cache.get('last_sha', '')
if last_sha:
commitrange = self.commitrange('HEAD', last_sha)
else:
commitrange = self.commitrange('HEAD')
self.total_authors += n_author
self.fill_rev_list_commitrange(commitrange)
# TODO Optimize this, it's the worst bottleneck
# outputs "<stamp> <files>" for each revision
to_ref, from_ref = commitrange
revlines = self.gyt_repo.repo.rev_list(to_ref, from_ref=from_ref)
for commit in revlines:
timest = commit.author.time
rev = commit.tree.hex
linecount = self.getFilesInCommit(rev)
self.files_by_stamp[int(timest)] = int(linecount)
self.total_commits += len(revlines)
extensions, total_files, total_size, source_files, source_lines \
= self.compute_file_size_and_extensions(proj_name)
self.extensions = extensions
self.total_files = total_files
self.total_size = total_size
self.source_files = source_files
self.source_lines = source_lines
self.fill_short_stats_commitrange(commitrange)
self.refine()
# here update new data after head sha
# here need to save to cache up to head sha
self.cache['last_sha'] = head
self.saveCache(proj_name, '/stats/' + proj_name)
def process_line_user(self, author, stamp, inserted, deleted):
if author not in self.authors:
self.authors[author] = {
'lines_added': 0,
'lines_removed': 0,
'commits': 0,
}
self.authors[author]['commits'] += 1
self.authors[author]['lines_added'] += inserted
self.authors[author]['lines_removed'] += deleted
if stamp not in self.changes_by_date_by_author:
self.changes_by_date_by_author[stamp] = {}
if author not in self.changes_by_date_by_author[stamp]:
self.changes_by_date_by_author[stamp][author] = {}
linesadd = self.authors[author]['lines_added']
commits_n = self.authors[author]['commits']
self.changes_by_date_by_author[stamp][author]['lines_added'] = linesadd
self.changes_by_date_by_author[stamp][author]['commits'] = commits_n
def refine(self):
# authors
# name -> {place_by_commits, commits_frac, date_first, date_last,
# timedelta}
self.authors_by_commits = getkeyssortedbyvaluekey(
self.authors, 'commits')
self.authors_by_commits.reverse() # most first
for i, name in enumerate(self.authors_by_commits):
self.authors[name]['place_by_commits'] = i + 1
for name in self.authors.keys():
a = self.authors[name]
a['commits_frac'] = (
100 * float(a['commits'])) / self.getTotalCommits()
date_first = datetime.fromtimestamp(a['first_commit_stamp'])
date_last = datetime.fromtimestamp(a['last_commit_stamp'])
delta = date_last - date_first
a['date_first'] = date_first.strftime('%Y-%m-%d')
a['date_last'] = date_last.strftime('%Y-%m-%d')
a['timedelta'] = delta
if 'lines_added' not in a:
a['lines_added'] = 0
if 'lines_removed' not in a:
a['lines_removed'] = 0
def getActiveDays(self):
return self.active_days
def getActivityByDayOfWeek(self):
return self.d_of_week_act
def getActivityByHourOfDay(self):
return self.h_of_day_act
def getAuthorInfo(self, author):
return self.authors[author]
def getAuthors(self, limit=None):
res = getkeyssortedbyvaluekey(self.authors, 'commits')
res.reverse()
return res[:limit]
def getCommitDeltaDays(self):
return (self.last_commit_stamp / 86400 - self.first_commit_stamp / 86400) + 1 # noqa
def getFilesInCommit(self, rev):
try:
res = self.cache['files_in_tree'][rev]
except:
res = len(self.gyt_repo.repo.ls_tree(rev,
recursive=True,
name_only=True))
if 'files_in_tree' not in self.cache:
self.cache['files_in_tree'] = {}
self.cache['files_in_tree'][rev] = res
return res
def getFirstCommitDate(self):
return datetime.fromtimestamp(self.first_commit_stamp)
def getLastCommitDate(self):
return datetime.fromtimestamp(self.last_commit_stamp)
def getLinesInBlob(self, sha1):
try:
res = self.cache['lines_in_blob'][sha1]
except:
res = len(self.gyt_repo.repo.cat_file(sha1).split('\n'))
if 'lines_in_blob' not in self.cache:
self.cache['lines_in_blob'] = {}
self.cache['lines_in_blob'][sha1] = res
return res
def getTotalAuthors(self):
return self.total_authors
def getTotalCommits(self):
return self.total_commits
def getTotalFiles(self):
return self.total_files
def getTotalLOC(self):
return self.total_lines
def getTotalSize(self):
return self.total_size
|
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.core.files.storage import default_storage
from django.urls import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Node, Template, TemplateSyntaxError
try:
# Django >= 2.1
from django.template.base import TokenType
TOKEN_TEXT = TokenType.TEXT
TOKEN_VAR = TokenType.VAR
TOKEN_BLOCK = TokenType.BLOCK
TOKEN_COMMENT = TokenType.COMMENT
except:
# Django < 2.1
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR)
from django.template.base import TextNode
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from django.utils.safestring import SafeText, mark_safe
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url, home_slug
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
def initialize_nevercache():
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
token_type = token.token_type
if token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
initialize_nevercache()
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(context.flatten())
@register.inclusion_tag("includes/form_errors.html")
def errors_for(form):
"""
Renders an alert if the form has any errors.
"""
return {"form": form}
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
template_vars = {
"request": context["request"],
}
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
template_vars["search_model_choices"] = sorted(search_model_choices)
return template_vars
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext.lower(), "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
# Transpose to align the image to its orientation if necessary.
# If the image is transposed, delete the exif information as
# not all browsers support the CSS image-orientation:
# - http://caniuse.com/#feat=css-image-orientation
try:
orientation = image._getexif().get(0x0112)
except:
orientation = None
if orientation:
methods = {
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)}.get(orientation, ())
if methods:
image_info.pop('exif', None)
for method in methods:
image = image.transpose(method)
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA") \
and filetype not in ("JPG", "JPEG"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(unquote(thumb_url), File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
template_vars = {
"has_site_permission": has_site_permission(user),
"request": context["request"],
}
if (settings.INLINE_EDITING_ENABLED and
template_vars["has_site_permission"]):
t = get_template("includes/editable_toolbar.html")
template_vars["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
template_vars["editable_obj"] = context.get("editable_obj",
context.get("page", None))
template_vars["accounts_logout_url"] = context.get(
"accounts_logout_url", None)
template_vars["toolbar"] = t.render(template_vars)
template_vars["richtext_media"] = RichTextField().formfield(
).widget.media
return template_vars
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
for filter_name in settings.RICHTEXT_FILTERS:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
if not isinstance(content, SafeText):
# raise TypeError(
# filter_name + " must mark it's return value as safe. See "
# "https://docs.djangoproject.com/en/stable/topics/security/"
# "#cross-site-scripting-xss-protection")
import warnings
warnings.warn(
filter_name + " needs to ensure that any untrusted inputs are "
"properly escaped and mark the html it returns as safe. In a "
"future release this will cause an exception. See "
"https://docs.djangoproject.com/en/stable/topics/security/"
"cross-site-scripting-xss-protection",
FutureWarning)
content = mark_safe(content)
return content
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(context.flatten())
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if hasattr(model_admin, "in_menu"):
import warnings
warnings.warn(
'ModelAdmin.in_menu() has been replaced with '
'ModelAdmin.has_module_permission(request). See '
'https://docs.djangoproject.com/en/stable/ref/contrib/admin/'
'#django.contrib.admin.ModelAdmin.has_module_permission.',
DeprecationWarning)
in_menu = in_menu and model_admin.has_module_permission(request)
if in_menu and request.user.has_module_perms(opts.app_label):
admin_url_name = ""
if model_admin.has_change_permission(request):
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if model_admin.has_add_permission(request):
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
try:
app_title = opts.app_config.verbose_name.title()
except AttributeError:
# Third party admin classes doing weird things.
# See GH #1628
app_title = ""
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"object_name": opts.object_name,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
try:
sites = user.sitepermissions.sites.all()
except ObjectDoesNotExist:
sites = Site.objects.none()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context.flatten()
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context.flatten()
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context.flatten()
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(context))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url "de" %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
if not view.namespace and view.url_name == "home":
url = home_slug()
else:
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
qs = context['request'].META.get("QUERY_STRING", "")
if qs:
url += "?" + qs
return url
|
|
# -*- coding: utf-8 -*-
# pylint: disable=not-context-manager
# NOTE: The pylint not-content-manager warning is disabled pending the fix of
# a bug in pylint. See https://github.com/PyCQA/pylint/issues/782
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""Classes to handle Sonos UPnP Events and Subscriptions.
The `Subscription` class from this module will be used in
:py:mod:`soco.services` unless `config.EVENTS_MODULE` is set to
point to :py:mod:`soco.events_twisted`, in which case
:py:mod:`soco.events_twisted.Subscription` will be used. See the
Example in :py:mod:`soco.events_twisted`.
Example:
Run this code, and change your volume, tracks etc::
from __future__ import print_function
try:
from queue import Empty
except: # Py2.7
from Queue import Empty
import logging
logging.basicConfig()
import soco
from pprint import pprint
from soco.events import event_listener
# pick a device at random and use it to get
# the group coordinator
device = soco.discover().pop().group.coordinator
print (device.player_name)
sub = device.renderingControl.subscribe()
sub2 = device.avTransport.subscribe()
while True:
try:
event = sub.events.get(timeout=0.5)
pprint (event.variables)
except Empty:
pass
try:
event = sub2.events.get(timeout=0.5)
pprint (event.variables)
except Empty:
pass
except KeyboardInterrupt:
sub.unsubscribe()
sub2.unsubscribe()
event_listener.stop()
break
"""
from __future__ import unicode_literals
import errno
import logging
import threading
import requests
from .compat import BaseHTTPRequestHandler, URLError, socketserver, urlopen
# Event is imported so that 'from events import Events' still works
# pylint: disable=unused-import
from .events_base import Event # noqa: F401
from .events_base import (
EventNotifyHandlerBase,
EventListenerBase,
SubscriptionBase,
SubscriptionsMap,
)
from .exceptions import SoCoException
log = logging.getLogger(__name__) # pylint: disable=C0103
class EventServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""A TCP server which handles each new request in a new thread."""
allow_reuse_address = True
class EventNotifyHandler(BaseHTTPRequestHandler, EventNotifyHandlerBase):
"""Handles HTTP ``NOTIFY`` Verbs sent to the listener server.
Inherits from `soco.events_base.EventNotifyHandlerBase`.
"""
def __init__(self, *args, **kwargs):
# The SubscriptionsMap instance created when this module is imported.
# This is referenced by soco.events_base.EventNotifyHandlerBase.
self.subscriptions_map = subscriptions_map
# super appears at the end of __init__, because
# BaseHTTPRequestHandler.__init__ does not return.
super().__init__(*args, **kwargs)
def do_NOTIFY(self): # pylint: disable=invalid-name
"""Serve a ``NOTIFY`` request by calling `handle_notification`
with the headers and content.
"""
headers = requests.structures.CaseInsensitiveDict(self.headers)
content_length = int(headers["content-length"])
content = self.rfile.read(content_length)
self.handle_notification(headers, content)
self.send_response(200)
self.end_headers()
# pylint: disable=no-self-use, missing-docstring
def log_event(self, seq, service_id, timestamp):
log.info(
"Event %s received for %s service on thread %s at %s",
seq,
service_id,
threading.current_thread(),
timestamp,
)
def log_message(self, fmt, *args): # pylint: disable=arguments-differ
# Divert standard webserver logging to the debug log
log.debug(fmt, *args)
class EventServerThread(threading.Thread):
"""The thread in which the event listener server will run."""
def __init__(self, server):
"""
Args:
address (tuple): The (ip, port) address on which the server
should listen.
"""
super().__init__()
#: `threading.Event`: Used to signal that the server should stop.
self.stop_flag = threading.Event()
#: `tuple`: The (ip, port) address on which the server is
#: configured to listen.
self.server = server
def run(self):
"""Start the server
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
"""
log.info("Event listener running on %s", self.server.server_address)
# Listen for events until told to stop
while not self.stop_flag.is_set():
self.server.handle_request()
def stop(self):
"""Stop the server."""
self.stop_flag.set()
class EventListener(EventListenerBase):
"""The Event Listener.
Runs an http server in a thread which is an endpoint for ``NOTIFY``
requests from Sonos devices. Inherits from
`soco.events_base.EventListenerBase`.
"""
def __init__(self):
super().__init__()
#: `EventServerThread`: thread on which to run.
self._listener_thread = None
def listen(self, ip_address):
"""Start the event listener listening on the local machine at
port 1400 (default). If this port is unavailable, the
listener will attempt to listen on the next available port,
within a range of 100.
Make sure that your firewall allows connections to this port.
This method is called by `soco.events_base.EventListenerBase.start`
Args:
ip_address (str): The local network interface on which the server
should start listening.
Returns:
int: `requested_port_number`. Included for
compatibility with `soco.events_twisted.EventListener.listen`
Note:
The port on which the event listener listens is configurable.
See `config.EVENT_LISTENER_PORT`
"""
for port_number in range(
self.requested_port_number, self.requested_port_number + 100
):
address = (ip_address, port_number)
try:
server = EventServer(address, EventNotifyHandler)
break
except OSError as oserror:
if oserror.errno == errno.EADDRINUSE:
log.debug("Port %s:%d is in use", ip_address, port_number)
else:
raise
self._listener_thread = EventServerThread(server)
self._listener_thread.daemon = True
self._listener_thread.start()
if port_number != self.requested_port_number:
log.debug(
"The first available port %d was used instead of %d",
port_number,
self.requested_port_number,
)
return port_number
def stop_listening(self, address):
"""Stop the listener."""
# Signal the thread to stop before handling the next request
self._listener_thread.stop()
# Send a dummy request in case the http server is currently listening
try:
urlopen("http://%s:%s/" % (address[0], address[1]))
except URLError:
# If the server is already shut down, we receive a socket error,
# which we ignore.
pass
# wait for the thread to finish, with a timeout of one second
# to ensure the main thread does not hang
self._listener_thread.join(1)
# check if join timed out and issue a warning if it did
if self._listener_thread.is_alive():
log.warning("Event Listener did not shutdown gracefully.")
class Subscription(SubscriptionBase):
"""A class representing the subscription to a UPnP event.
Inherits from `soco.events_base.SubscriptionBase`.
"""
def __init__(self, service, event_queue=None):
"""
Args:
service (Service): The SoCo `Service` to which the subscription
should be made.
event_queue (:class:`~queue.Queue`): A queue on which received
events will be put. If not specified, a queue will be
created and used.
"""
super().__init__(service, event_queue)
# Used to keep track of the auto_renew thread
self._auto_renew_thread = None
self._auto_renew_thread_flag = threading.Event()
# The SubscriptionsMap instance created when this module is imported.
# This is referenced by soco.events_base.SubscriptionBase.
self.subscriptions_map = subscriptions_map
# The EventListener instance created when this module is imported.
# This is referenced by soco.events_base.SubscriptionBase.
self.event_listener = event_listener
# Used to stop race conditions, as autorenewal may occur from a thread
self._lock = threading.Lock()
# pylint: disable=arguments-differ
def subscribe(self, requested_timeout=None, auto_renew=False, strict=True):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
This method calls `events_base.SubscriptionBase.subscribe`.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
subscribe = super().subscribe
return self._wrap(subscribe, strict, requested_timeout, auto_renew)
def renew(self, requested_timeout=None, is_autorenew=False, strict=True):
"""renew(requested_timeout=None)
Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
This method calls `events_base.SubscriptionBase.renew`.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
is_autorenew (bool, optional): Whether this is an autorenewal.
Default 'False'.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
renew = super().renew
return self._wrap(renew, strict, requested_timeout, is_autorenew)
def unsubscribe(self, strict=True):
"""unsubscribe()
Unsubscribe from the service's events.
Once unsubscribed, a Subscription instance should not be reused
This method calls `events_base.SubscriptionBase.unsubscribe`.
Args:
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
unsubscribe = super().unsubscribe
return self._wrap(unsubscribe, strict)
def _auto_renew_start(self, interval):
"""Starts the auto_renew thread."""
class AutoRenewThread(threading.Thread):
"""Used by the auto_renew code to renew a subscription from within
a thread.
"""
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interval = interval
self.subscription = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
subscription = self.subscription
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
subscription.renew(is_autorenew=True, strict=False)
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self
)
auto_renew_thread.start()
def _auto_renew_cancel(self):
"""Cancels the auto_renew thread"""
self._auto_renew_thread_flag.set()
# pylint: disable=no-self-use, too-many-arguments
def _request(self, method, url, headers, success):
"""Sends an HTTP request.
Args:
method (str): 'SUBSCRIBE' or 'UNSUBSCRIBE'.
url (str): The full endpoint to which the request is being sent.
headers (dict): A dict of headers, each key and each value being
of type `str`.
success (function): A function to be called if the
request succeeds. The function will be called with a dict
of response headers as its only parameter.
"""
response = requests.request(method, url, headers=headers)
response.raise_for_status()
if success:
success(response.headers)
def _wrap(self, method, strict, *args, **kwargs):
"""This is a wrapper for `Subscription.subscribe`, `Subscription.renew`
and `Subscription.unsubscribe` which:
* Returns the`Subscription` instance.
* If an Exception has occurred:
* Cancels the Subscription (unless the Exception was caused by
a SoCoException upon subscribe).
* On an autorenew, if the strict flag was set to False, calls
the optional self.auto_renew_fail method with the
Exception. This method needs to be threadsafe.
* If the strict flag was set to True (the default), reraises
the Exception or, if the strict flag was set to False, logs
the Exception instead.
* Calls the wrapped methods with a threading.Lock, to prevent race
conditions (e.g. to prevent unsubscribe and autorenew being
called simultaneously).
"""
action = method.__name__
# A lock is used, because autorenewal occurs in
# a thread
with self._lock:
try:
method(*args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
# If an Exception occurred during execution of subscribe,
# renew or unsubscribe, set the cancel flag to True unless
# the Exception was a SoCoException upon subscribe
cancel = action == "renew" or not isinstance(exc, SoCoException)
if cancel:
# If the cancel flag was set to true, cancel the
# subscription with an explanation.
msg = (
"An Exception occurred. Subscription to"
+ " {}, sid: {} has been cancelled".format(
self.service.base_url + self.service.event_subscription_url,
self.sid,
)
)
self._cancel_subscription(msg)
# If we're not being strict, log the Exception
if not strict:
msg = (
"Exception received in Subscription."
+ "{} for Subscription to:\n{}, sid: {}".format(
action,
self.service.base_url + self.service.event_subscription_url,
self.sid,
)
)
log.exception(msg)
# If we're not being strict upon a renewal
# (e.g. an autorenewal) call the optional
# self.auto_renew_fail method, if it has been set
if action == "renew" and self.auto_renew_fail is not None:
if hasattr(self.auto_renew_fail, "__call__"):
# pylint: disable=not-callable
self.auto_renew_fail(exc)
# If we're being strict, reraise the Exception
else:
raise # pylint: disable=raising-bad-type
else:
# Return the Subscription to the function that
# called subscribe, renew or unsubscribe (unless an
# Exception occurred and it was reraised above)
return self # pylint: disable=lost-exception
subscriptions_map = SubscriptionsMap() # pylint: disable=C0103
event_listener = EventListener() # pylint: disable=C0103
|
|
from . import AWSObject, AWSProperty, Tags
from .validators import (
boolean, double, integer_range, json_checker, positive_integer
)
def validate_authorizer_ttl(ttl_value):
""" Validate authorizer ttl timeout
:param ttl_value: The TTL timeout in seconds
:return: The provided TTL value if valid
"""
ttl_value = int(positive_integer(ttl_value))
if ttl_value > 3600:
raise ValueError("The AuthorizerResultTtlInSeconds should be <= 3600")
return ttl_value
class AccessLogSetting(AWSProperty):
props = {
"DestinationArn": (basestring, False),
"Format": (basestring, False)
}
class Account(AWSObject):
resource_type = "AWS::ApiGateway::Account"
props = {
"CloudWatchRoleArn": (basestring, False)
}
class StageKey(AWSProperty):
props = {
"RestApiId": (basestring, False),
"StageName": (basestring, False)
}
class ApiKey(AWSObject):
resource_type = "AWS::ApiGateway::ApiKey"
props = {
"CustomerId": (basestring, False),
"Description": (basestring, False),
"Enabled": (boolean, False),
"GenerateDistinctId": (boolean, False),
"Name": (basestring, False),
"StageKeys": ([StageKey], False),
"Tags": (Tags, False),
"Value": (basestring, False)
}
class Authorizer(AWSObject):
resource_type = "AWS::ApiGateway::Authorizer"
props = {
"AuthType": (basestring, False),
"AuthorizerCredentials": (basestring, False),
"AuthorizerResultTtlInSeconds": (validate_authorizer_ttl, False),
"AuthorizerUri": (basestring, True),
"IdentitySource": (basestring, True),
"IdentityValidationExpression": (basestring, False),
"Name": (basestring, True),
"ProviderARNs": ([basestring], False),
"RestApiId": (basestring, False),
"Type": (basestring, True)
}
class BasePathMapping(AWSObject):
resource_type = "AWS::ApiGateway::BasePathMapping"
props = {
"BasePath": (basestring, False),
"DomainName": (basestring, True),
"RestApiId": (basestring, True),
"Stage": (basestring, False)
}
# Represents:
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-stage-canarysetting.html
class CanarySetting(AWSProperty):
props = {
"DeploymentId": (basestring, False),
"PercentTraffic": ([double], False),
"StageVariableOverrides": (dict, False),
"UseStageCache": (boolean, False),
}
StageCanarySetting = CanarySetting
class ClientCertificate(AWSObject):
resource_type = "AWS::ApiGateway::ClientCertificate"
props = {
"Description": (basestring, False),
"Tags": (Tags, False),
}
# Represents:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-deployment-canarysetting.html
# and
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-deployment-deploymentcanarysettings.html
class DeploymentCanarySettings(AWSProperty):
props = {
"PercentTraffic": ([double], False),
"StageVariableOverrides": (dict, False),
"UseStageCache": (boolean, False),
}
DeploymentCanarySetting = DeploymentCanarySettings
class MethodSetting(AWSProperty):
props = {
"CacheDataEncrypted": (bool, False),
"CacheTtlInSeconds": (positive_integer, False),
"CachingEnabled": (bool, False),
"DataTraceEnabled": (bool, False),
"HttpMethod": (basestring, True),
"LoggingLevel": (basestring, False),
"MetricsEnabled": (bool, False),
"ResourcePath": (basestring, True),
"ThrottlingBurstLimit": (positive_integer, False),
"ThrottlingRateLimit": (positive_integer, False)
}
class StageDescription(AWSProperty):
props = {
"AccessLogSetting": (AccessLogSetting, False),
"CacheClusterEnabled": (bool, False),
"CacheClusterSize": (basestring, False),
"CacheDataEncrypted": (bool, False),
"CacheTtlInSeconds": (positive_integer, False),
"CachingEnabled": (bool, False),
"CanarySetting": (DeploymentCanarySettings, False),
"ClientCertificateId": (basestring, False),
"DataTraceEnabled": (bool, False),
"Description": (basestring, False),
"LoggingLevel": (basestring, False),
"MethodSettings": ([MethodSetting], False),
"MetricsEnabled": (bool, False),
"StageName": (basestring, False),
"Tags": ((Tags, list), False),
"ThrottlingBurstLimit": (positive_integer, False),
"ThrottlingRateLimit": (positive_integer, False),
"Variables": (dict, False),
}
def validate(self):
if 'StageName' in self.properties:
raise DeprecationWarning(
"The StageName property has been deprecated "
"in StageDescription"
)
class Deployment(AWSObject):
resource_type = "AWS::ApiGateway::Deployment"
props = {
"DeploymentCanarySettings": (DeploymentCanarySettings, False),
"Description": (basestring, False),
"RestApiId": (basestring, True),
"StageDescription": (StageDescription, False),
"StageName": (basestring, False)
}
class Location(AWSProperty):
props = {
"Method": (basestring, False),
"Name": (basestring, False),
"Path": (basestring, False),
"StatusCode": (basestring, False),
"Type": (basestring, False),
}
class DocumentationPart(AWSObject):
resource_type = "AWS::ApiGateway::DocumentationPart"
props = {
"Location": (Location, True),
"Properties": (basestring, True),
"RestApiId": (basestring, True),
}
class DocumentationVersion(AWSObject):
resource_type = "AWS::ApiGateway::DocumentationVersion"
props = {
"Description": (basestring, False),
"DocumentationVersion": (basestring, True),
"RestApiId": (basestring, True),
}
class EndpointConfiguration(AWSProperty):
props = {
"Types": ([basestring], False),
"VpcEndpointIds": ([basestring], False),
}
class DomainName(AWSObject):
resource_type = "AWS::ApiGateway::DomainName"
props = {
"CertificateArn": (basestring, False),
"DomainName": (basestring, True),
"EndpointConfiguration": (EndpointConfiguration, False),
"RegionalCertificateArn": (basestring, False),
"SecurityPolicy": (basestring, False),
"Tags": (Tags, False),
}
class IntegrationResponse(AWSProperty):
props = {
"ContentHandling": (basestring, False),
"ResponseParameters": (dict, False),
"ResponseTemplates": (dict, False),
"SelectionPattern": (basestring, False),
"StatusCode": (basestring, False)
}
class Integration(AWSProperty):
props = {
"CacheKeyParameters": ([basestring], False),
"CacheNamespace": (basestring, False),
"ConnectionId": (basestring, False),
"ConnectionType": (basestring, False),
"ContentHandling": (basestring, False),
"Credentials": (basestring, False),
"IntegrationHttpMethod": (basestring, False),
"IntegrationResponses": ([IntegrationResponse], False),
"PassthroughBehavior": (basestring, False),
"RequestParameters": (dict, False),
"RequestTemplates": (dict, False),
"TimeoutInMillis": (integer_range(50, 29000), False),
"Type": (basestring, True),
"Uri": (basestring, False)
}
class MethodResponse(AWSProperty):
props = {
"ResponseModels": (dict, False),
"ResponseParameters": (dict, False),
"StatusCode": (basestring, True)
}
class Method(AWSObject):
resource_type = "AWS::ApiGateway::Method"
props = {
"ApiKeyRequired": (bool, False),
"AuthorizationScopes": ([basestring], False),
"AuthorizationType": (basestring, True),
"AuthorizerId": (basestring, False),
"HttpMethod": (basestring, True),
"Integration": (Integration, False),
"MethodResponses": ([MethodResponse], False),
"OperationName": (basestring, False),
"RequestModels": (dict, False),
"RequestParameters": (dict, False),
"RequestValidatorId": (basestring, False),
"ResourceId": (basestring, True),
"RestApiId": (basestring, True)
}
class Model(AWSObject):
resource_type = "AWS::ApiGateway::Model"
props = {
"ContentType": (basestring, False),
"Description": (basestring, False),
"Name": (basestring, False),
"RestApiId": (basestring, True),
"Schema": ((basestring, dict), False)
}
def validate(self):
name = 'Schema'
if name in self.properties:
schema = self.properties.get(name)
self.properties[name] = json_checker(schema)
class RequestValidator(AWSObject):
resource_type = "AWS::ApiGateway::RequestValidator"
props = {
"Name": (basestring, True),
"RestApiId": (basestring, True),
"ValidateRequestBody": (boolean, False),
"ValidateRequestParameters": (boolean, False),
}
class Resource(AWSObject):
resource_type = "AWS::ApiGateway::Resource"
props = {
"ParentId": (basestring, True),
"PathPart": (basestring, True),
"RestApiId": (basestring, True)
}
class S3Location(AWSProperty):
props = {
"Bucket": (basestring, False),
"ETag": (basestring, False),
"Key": (basestring, False),
"Version": (basestring, False)
}
class RestApi(AWSObject):
resource_type = "AWS::ApiGateway::RestApi"
props = {
"ApiKeySourceType": (basestring, False),
"BinaryMediaTypes": ([basestring], False),
"Body": (dict, False),
"BodyS3Location": (S3Location, False),
"CloneFrom": (basestring, False),
"Description": (basestring, False),
"EndpointConfiguration": (EndpointConfiguration, False),
"FailOnWarnings": (basestring, False),
"MinimumCompressionSize": (positive_integer, False),
"Name": (basestring, False),
"Parameters": (dict, False),
"Policy": (dict, False),
"Tags": (Tags, False),
}
class Stage(AWSObject):
resource_type = "AWS::ApiGateway::Stage"
props = {
"AccessLogSetting": (AccessLogSetting, False),
"CacheClusterEnabled": (bool, False),
"CacheClusterSize": (basestring, False),
"CanarySetting": (StageCanarySetting, False),
"ClientCertificateId": (basestring, False),
"DeploymentId": (basestring, True),
"Description": (basestring, False),
"DocumentationVersion": (basestring, False),
"MethodSettings": ([MethodSetting], False),
"RestApiId": (basestring, True),
"StageName": (basestring, True),
"Tags": ((Tags, list), False),
"TracingEnabled": (bool, False),
"Variables": (dict, False),
}
class QuotaSettings(AWSProperty):
props = {
"Limit": (positive_integer, False),
"Offset": (positive_integer, False),
"Period": (basestring, False),
}
class ThrottleSettings(AWSProperty):
props = {
"BurstLimit": (positive_integer, False),
"RateLimit": (positive_integer, False),
}
class ApiStage(AWSProperty):
props = {
"ApiId": (basestring, False),
"Stage": (basestring, False),
"Throttle": (ThrottleSettings, False),
}
class UsagePlan(AWSObject):
resource_type = "AWS::ApiGateway::UsagePlan"
props = {
"ApiStages": ([ApiStage], False),
"Description": (basestring, False),
"Quota": (QuotaSettings, False),
"Tags": (Tags, False),
"Throttle": (ThrottleSettings, False),
"UsagePlanName": (basestring, False),
}
class UsagePlanKey(AWSObject):
resource_type = "AWS::ApiGateway::UsagePlanKey"
props = {
"KeyId": (basestring, True),
"KeyType": (basestring, True),
"UsagePlanId": (basestring, True),
}
def validate_gateway_response_type(response_type):
""" Validate response type
:param response_type: The GatewayResponse response type
:return: The provided value if valid
"""
valid_response_types = [
"ACCESS_DENIED",
"API_CONFIGURATION_ERROR",
"AUTHORIZER_FAILURE",
"AUTHORIZER_CONFIGURATION_ERROR",
"BAD_REQUEST_PARAMETERS",
"BAD_REQUEST_BODY",
"DEFAULT_4XX",
"DEFAULT_5XX",
"EXPIRED_TOKEN",
"INVALID_SIGNATURE",
"INTEGRATION_FAILURE",
"INTEGRATION_TIMEOUT",
"INVALID_API_KEY",
"MISSING_AUTHENTICATION_TOKEN",
"QUOTA_EXCEEDED",
"REQUEST_TOO_LARGE",
"RESOURCE_NOT_FOUND",
"THROTTLED",
"UNAUTHORIZED",
"UNSUPPORTED_MEDIA_TYPE"
]
if response_type not in valid_response_types:
raise ValueError(
"{} is not a valid ResponseType".format(response_type)
)
return response_type
class GatewayResponse(AWSObject):
resource_type = "AWS::ApiGateway::GatewayResponse"
props = {
"ResponseParameters": (dict, False),
"ResponseTemplates": (dict, False),
"ResponseType": (validate_gateway_response_type, True),
"RestApiId": (basestring, True),
"StatusCode": (basestring, False)
}
class VpcLink(AWSObject):
resource_type = "AWS::ApiGateway::VpcLink"
props = {
'Description': (basestring, False),
'Name': (basestring, True),
'TargetArns': ([basestring], True),
}
|
|
import os
import pytest
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import time
import ray
import ray.ray_constants
import ray._private.gcs_utils as gcs_utils
from ray._private.test_utils import (
wait_for_condition,
convert_actor_state,
make_global_state_accessor,
)
# TODO(rliaw): The proper way to do this is to have the pytest config setup.
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.",
)
@pytest.mark.timeout(30)
def test_replenish_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
assert cluster_resources == available_resources
@ray.remote
def cpu_task():
pass
ray.get(cpu_task.remote())
resources_reset = False
while not resources_reset:
available_resources = ray.available_resources()
resources_reset = cluster_resources == available_resources
assert resources_reset
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.",
)
@pytest.mark.timeout(30)
def test_uses_resources(ray_start_regular):
cluster_resources = ray.cluster_resources()
@ray.remote
def cpu_task():
time.sleep(1)
cpu_task.remote()
resource_used = False
while not resource_used:
available_resources = ray.available_resources()
resource_used = (
available_resources.get("CPU", 0) == cluster_resources.get("CPU", 0) - 1
)
assert resource_used
@pytest.mark.skipif(
pytest_timeout is None,
reason="Timeout package not installed; skipping test that may hang.",
)
@pytest.mark.timeout(120)
def test_add_remove_cluster_resources(ray_start_cluster_head):
"""Tests that Global State API is consistent with actual cluster."""
cluster = ray_start_cluster_head
assert ray.cluster_resources()["CPU"] == 1
nodes = []
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 2
cluster.remove_node(nodes.pop())
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
for i in range(5):
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 6
def test_global_state_actor_table(ray_start_regular):
@ray.remote
class Actor:
def ready(self):
return os.getpid()
# actor table should be empty at first
assert len(ray.state.actors()) == 0
# actor table should contain only one entry
def get_actor_table_data(field):
return list(ray.state.actors().values())[0][field]
a = Actor.remote()
pid = ray.get(a.ready.remote())
assert len(ray.state.actors()) == 1
assert get_actor_table_data("Pid") == pid
# actor table should contain only this entry
# even when the actor goes out of scope
del a
dead_state = convert_actor_state(gcs_utils.ActorTableData.DEAD)
for _ in range(10):
if get_actor_table_data("State") == dead_state:
break
else:
time.sleep(0.5)
assert get_actor_table_data("State") == dead_state
def test_global_state_worker_table(ray_start_regular):
# Get worker table from gcs.
workers_data = ray.state.workers()
assert len(workers_data) == 1
def test_global_state_actor_entry(ray_start_regular):
@ray.remote
class Actor:
def ready(self):
pass
# actor table should be empty at first
assert len(ray.state.actors()) == 0
a = Actor.remote()
b = Actor.remote()
ray.get(a.ready.remote())
ray.get(b.ready.remote())
assert len(ray.state.actors()) == 2
a_actor_id = a._actor_id.hex()
b_actor_id = b._actor_id.hex()
assert ray.state.actors(actor_id=a_actor_id)["ActorID"] == a_actor_id
assert ray.state.actors(actor_id=a_actor_id)["State"] == convert_actor_state(
gcs_utils.ActorTableData.ALIVE
)
assert ray.state.actors(actor_id=b_actor_id)["ActorID"] == b_actor_id
assert ray.state.actors(actor_id=b_actor_id)["State"] == convert_actor_state(
gcs_utils.ActorTableData.ALIVE
)
@pytest.mark.parametrize("max_shapes", [0, 2, -1])
def test_load_report(shutdown_only, max_shapes):
resource1 = "A"
resource2 = "B"
cluster = ray.init(
num_cpus=1,
resources={resource1: 1},
_system_config={
"max_resource_shapes_per_load_report": max_shapes,
},
)
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote
def sleep():
time.sleep(1000)
sleep.remote()
for _ in range(3):
sleep.remote()
sleep.options(resources={resource1: 1}).remote()
sleep.options(resources={resource2: 1}).remote()
class Checker:
def __init__(self):
self.report = None
def check_load_report(self):
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
self.report = resource_usage.resource_load_by_shape.resource_demands
if max_shapes == 0:
return True
elif max_shapes == 2:
return len(self.report) >= 2
else:
return len(self.report) >= 3
# Wait for load information to arrive.
checker = Checker()
wait_for_condition(checker.check_load_report)
# Check that we respect the max shapes limit.
if max_shapes != -1:
assert len(checker.report) <= max_shapes
print(checker.report)
if max_shapes > 0:
# Check that we differentiate between infeasible and ready tasks.
for demand in checker.report:
if resource2 in demand.shape:
assert demand.num_infeasible_requests_queued > 0
assert demand.num_ready_requests_queued == 0
else:
assert demand.num_ready_requests_queued > 0
assert demand.num_infeasible_requests_queued == 0
global_state_accessor.disconnect()
def test_placement_group_load_report(ray_start_cluster):
cluster = ray_start_cluster
# Add a head node that doesn't have gpu resource.
cluster.add_node(num_cpus=4)
global_state_accessor = make_global_state_accessor(
ray.init(address=cluster.address)
)
class PgLoadChecker:
def nothing_is_ready(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 2
return False
def only_first_one_ready(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 1
return False
def two_infeasible_pg(self):
resource_usage = self._read_resource_usage()
if not resource_usage:
return False
if resource_usage.HasField("placement_group_load"):
pg_load = resource_usage.placement_group_load
return len(pg_load.placement_group_data) == 2
return False
def _read_resource_usage(self):
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
return resource_usage
checker = PgLoadChecker()
# Create 2 placement groups that are infeasible.
pg_feasible = ray.util.placement_group([{"A": 1}])
pg_infeasible = ray.util.placement_group([{"B": 1}])
_, unready = ray.wait([pg_feasible.ready(), pg_infeasible.ready()], timeout=0)
assert len(unready) == 2
wait_for_condition(checker.nothing_is_ready)
# Add a node that makes pg feasible. Make sure load include this change.
cluster.add_node(resources={"A": 1})
ray.get(pg_feasible.ready())
wait_for_condition(checker.only_first_one_ready)
# Create one more infeasible pg and make sure load is properly updated.
pg_infeasible_second = ray.util.placement_group([{"C": 1}])
_, unready = ray.wait([pg_infeasible_second.ready()], timeout=0)
assert len(unready) == 1
wait_for_condition(checker.two_infeasible_pg)
global_state_accessor.disconnect()
def test_backlog_report(shutdown_only):
cluster = ray.init(
num_cpus=1,
_system_config={"max_pending_lease_requests_per_scheduling_category": 1},
)
global_state_accessor = make_global_state_accessor(cluster)
@ray.remote(num_cpus=1)
def foo(x):
print(".")
time.sleep(x)
return None
def backlog_size_set():
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
aggregate_resource_load = resource_usage.resource_load_by_shape.resource_demands
if len(aggregate_resource_load) == 1:
backlog_size = aggregate_resource_load[0].backlog_size
print(backlog_size)
# Ideally we'd want to assert backlog_size == 8, but guaranteeing
# the order the order that submissions will occur is too
# hard/flaky.
return backlog_size > 0
return False
# We want this first task to finish
refs = [foo.remote(0.5)]
# These tasks should all start _before_ the first one finishes.
refs.extend([foo.remote(1000) for _ in range(9)])
# Now there's 1 request running, 1 queued in the raylet, and 8 queued in
# the worker backlog.
ray.get(refs[0])
# First request finishes, second request is now running, third lease
# request is sent to the raylet with backlog=7
wait_for_condition(backlog_size_set, timeout=2)
global_state_accessor.disconnect()
def test_heartbeat_ip(shutdown_only):
cluster = ray.init(num_cpus=1)
global_state_accessor = make_global_state_accessor(cluster)
self_ip = ray.util.get_node_ip_address()
def self_ip_is_set():
message = global_state_accessor.get_all_resource_usage()
if message is None:
return False
resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
resources_data = resource_usage.batch[0]
return resources_data.node_manager_address == self_ip
wait_for_condition(self_ip_is_set, timeout=2)
global_state_accessor.disconnect()
def test_next_job_id(ray_start_regular):
job_id_1 = ray.state.next_job_id()
job_id_2 = ray.state.next_job_id()
assert job_id_1.int() + 1 == job_id_2.int()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
|
# Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Tests for the various client classes."""
import sys
from mock import Mock, call
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.python import log
from twisted.trial.unittest import TestCase
import txstatsd.client
import txstatsd.metrics.metric
import txstatsd.metrics.metrics
from txstatsd.metrics.metric import Metric
from txstatsd.client import (
StatsDClientProtocol, TwistedStatsDClient, UdpStatsDClient,
ConsistentHashingClient
)
from txstatsd.protocol import DataQueue, TransportGateway
class FakeClient(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.data = []
self.connect_called = False
self.disconnect_called = False
def __str__(self):
return "%s:%d" % (self.host, self.port)
def write(self, data):
self.data.append(data)
def connect(self):
self.connect_called = True
def disconnect(self):
self.disconnect_called = True
class TestClient(TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.client = None
self.exception = None
def tearDown(self):
if self.client:
self.client.transport.stopListening()
super(TestClient, self).tearDown()
def build_protocol(self):
protocol = StatsDClientProtocol(self.client)
reactor.listenUDP(0, protocol)
def test_twistedstatsd_write(self):
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.client.host_resolved('127.0.0.1')
def ensure_bytes_sent(bytes_sent):
self.assertEqual(bytes_sent, len('message'))
def exercise(callback):
self.client.write('message', callback=callback)
d = Deferred()
d.addCallback(ensure_bytes_sent)
reactor.callWhenRunning(exercise, d.callback)
return d
@inlineCallbacks
def test_twistedstatsd_write_with_host_resolved(self):
self.client = TwistedStatsDClient.create(
'localhost', 8000)
self.build_protocol()
yield self.client.resolve_later
def ensure_bytes_sent(bytes_sent):
self.assertEqual(bytes_sent, len('message'))
self.assertEqual(self.client.host, '127.0.0.1')
def exercise(callback):
self.client.write('message', callback=callback)
d = Deferred()
d.addCallback(ensure_bytes_sent)
reactor.callWhenRunning(exercise, d.callback)
yield d
@inlineCallbacks
def test_twistedstatsd_with_malformed_address_and_errback(self):
exceptions_captured = []
def capture_exception_raised(failure):
exception = failure.getErrorMessage()
self.assertTrue(exception.startswith("DNS lookup failed"))
exceptions_captured.append(exception)
self.client = TwistedStatsDClient.create(
'256.0.0.0', 1,
resolver_errback=capture_exception_raised)
self.build_protocol()
yield self.client.resolve_later
self.assertEqual(len(exceptions_captured), 1)
@inlineCallbacks
def test_twistedstatsd_with_malformed_address_and_no_errback(self):
exceptions_captured = []
def capture_exception_raised(failure):
exception = failure.getErrorMessage()
self.assertTrue(exception.startswith("DNS lookup failed"))
exceptions_captured.append(exception)
self.patch(log, "err", capture_exception_raised)
self.client = TwistedStatsDClient.create(
'256.0.0.0', 1)
self.build_protocol()
yield self.client.resolve_later
self.assertEqual(len(exceptions_captured), 1)
def test_udpstatsd_wellformed_address(self):
client = UdpStatsDClient('localhost', 8000)
self.assertEqual(client.host, '127.0.0.1')
client = UdpStatsDClient(None, None)
self.assertEqual(client.host, None)
def test_udpstatsd_malformed_address(self):
self.assertRaises(ValueError,
UdpStatsDClient, 'localhost', -1)
self.assertRaises(ValueError,
UdpStatsDClient, 'localhost', 'malformed')
self.assertRaises(ValueError,
UdpStatsDClient, 0, 8000)
def test_udpstatsd_socket_nonblocking(self):
client = UdpStatsDClient('localhost', 8000)
client.connect()
# According to the python docs (and the source, I've checked)
# setblocking(0) is the same as settimeout(0.0).
self.assertEqual(client.socket.gettimeout(), 0.0)
def test_udp_client_can_be_imported_without_twisted(self):
"""Ensure that the twisted-less client can be used without twisted."""
unloaded = [(name, mod) for (name, mod) in sys.modules.items()
if 'twisted' in name]
def restore_modules():
for name, mod in unloaded:
sys.modules[name] = mod
reload(txstatsd.client)
reload(txstatsd.metrics.metrics)
reload(txstatsd.metrics.metric)
self.addCleanup(restore_modules)
# Mark everything twistedish as unavailable
for name, mod in unloaded:
sys.modules[name] = None
reload(txstatsd.client)
reload(txstatsd.metrics.metrics)
reload(txstatsd.metrics.metric)
for mod in sys.modules:
if 'twisted' in mod:
self.assertTrue(sys.modules[mod] is None)
def test_starts_with_data_queue(self):
"""The client starts with a DataQueue."""
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.assertIsInstance(self.client.data_queue, DataQueue)
def test_starts_with_transport_gateway_if_ip(self):
"""The client starts without a TransportGateway."""
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.assertTrue(self.client.transport_gateway is not None)
def test_starts_without_transport_gateway_if_not_ip(self):
"""The client starts without a TransportGateway."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.assertTrue(self.client.transport_gateway is None)
def test_passes_transport_to_gateway(self):
"""The client passes the transport to the gateway as soon as the client
is connected."""
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.client.host_resolved('127.0.0.1')
self.assertEqual(self.client.transport_gateway.transport,
self.client.transport)
def test_passes_reactor_to_gateway(self):
"""The client passes the reactor to the gateway as soon as the client
is connected."""
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.client.host_resolved('127.0.0.1')
self.assertEqual(self.client.transport_gateway.reactor,
self.client.reactor)
def test_sets_ip_when_host_resolves(self):
"""As soon as the host is resolved, set the IP as the host."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.assertEqual(self.client.host, 'localhost')
self.client.host_resolved('127.0.0.1')
self.assertEqual(self.client.host, '127.0.0.1')
def test_sets_transport_gateway_when_host_resolves(self):
"""As soon as the host is resolved, set the transport gateway."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.client.transport_gateway = None
self.client.host_resolved('127.0.0.1')
self.assertIsInstance(self.client.transport_gateway, TransportGateway)
def test_calls_connect_callback_when_host_resolves(self):
"""As soon as the host is resolved, call back the connect_callback."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.client.connect_callback = Mock()
self.client.host_resolved('127.0.0.1')
self.assertTrue(self.client.connect_callback.called)
self.client.connect_callback.assert_called_once_with()
def test_sends_messages_to_gateway_after_host_resolves(self):
"""After the host is resolved, send messages to the
TransportGateway."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.client.host_resolved('127.0.0.1')
message = 'some data'
bytes_sent = len(message)
self.client.data_queue = Mock(spec=DataQueue)
self.client.transport_gateway = Mock(spec=TransportGateway)
callback = Mock()
self.client.transport_gateway.write.return_value = bytes_sent
self.assertEqual(self.client.write(message, callback), bytes_sent)
self.client.transport_gateway.write.assert_called_once_with(
message, callback)
def test_sends_messages_to_queue_before_host_resolves(self):
"""Before the host is resolved, send messages to the DataQueue."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
message = 'some data'
self.client.data_queue = Mock(spec=DataQueue)
callback = Mock()
self.client.data_queue.write.return_value = None
result = self.client.write(message, callback)
self.client.data_queue.write.assert_called_once_with(message, callback)
self.assertEqual(result, None)
def test_flushes_queued_messages_to_the_gateway_when_host_resolves(self):
"""As soon as the host is resolved, flush all messages to the
TransportGateway."""
self.client = TwistedStatsDClient('localhost', 8000)
self.build_protocol()
self.client.data_queue.write('data 1', 'callback 1')
self.client.data_queue.write('data 2', 'callback 2')
self.client.data_queue.write('data 3', 'callback 3')
mock_gateway_write = Mock()
self.patch(TransportGateway, 'write', mock_gateway_write)
self.client.host_resolved('127.0.0.1')
self.assertTrue(mock_gateway_write.call_count, 3)
expected = [call('data 1', 'callback 1'),
call('data 2', 'callback 2'),
call('data 3', 'callback 3')]
self.assertEqual(mock_gateway_write.call_args_list, expected)
def test_sets_client_transport_when_connected(self):
"""Set the transport as an attribute of the client."""
self.client = TwistedStatsDClient('localhost', 8000)
transport = DummyTransport()
self.client.connect(transport)
self.assertEqual(self.client.transport, transport)
def test_sets_gateway_transport_when_connected(self):
"""Set the transport as an attribute of the TransportGateway."""
self.client = TwistedStatsDClient('localhost', 8000)
self.client.host_resolved('127.0.0.1')
transport = DummyTransport()
self.client.connect(transport)
self.assertEqual(self.client.transport_gateway.transport, transport)
class DataQueueTest(TestCase):
"""Tests for the DataQueue class."""
def setUp(self):
super(DataQueueTest, self).setUp()
self.queue = DataQueue(limit=2)
def test_queues_messages_and_callbacks(self):
"""All messages are queued with their respective callbacks."""
self.queue.write(data=1, callback='1')
self.queue.write(data=2, callback='2')
self.assertEqual(self.queue.flush(), [
(1, '1'),
(2, '2'),
])
def test_flushes_the_queue(self):
"""All messages are queued with their respective callbacks."""
self.queue.write(data=1, callback='1')
self.queue.write(data=2, callback='2')
self.queue.flush()
self.assertEqual(self.queue.flush(), [])
def test_limits_number_of_messages(self):
"""Cannot save more messages than the defined limit."""
self.queue.write('saved data', 'saved callback')
self.queue.write('saved data', 'saved callback')
self.queue.write('discarded data', 'discarded message')
self.assertEqual(len(self.queue.flush()), 2)
def test_discards_messages_after_limit(self):
"""Cannot save more messages than the defined limit."""
self.queue.write('saved data', 'saved callback')
self.queue.write('saved data', 'saved callback')
self.queue.write('discarded data', 'discarded message')
self.assertEqual(set(self.queue.flush()),
set([('saved data', 'saved callback')]))
def test_makes_limit_optional(self):
"""Use the default limit when not given."""
queue = DataQueue()
self.assertTrue(queue._limit > 0)
class TestConsistentHashingClient(TestCase):
def test_hash_with_single_client(self):
clients = [
FakeClient("127.0.0.1", 10001),
]
client = ConsistentHashingClient(clients)
bar = Metric(client, "bar")
foo = Metric(client, "foo")
dba = Metric(client, "dba")
bar.send("1")
foo.send("1")
dba.send("1")
self.assertEqual(clients[0].data, ["bar:1",
"foo:1",
"dba:1"])
def test_hash_with_two_clients(self):
clients = [
FakeClient("127.0.0.1", 10001),
FakeClient("127.0.0.1", 10002),
]
client = ConsistentHashingClient(clients)
bar = Metric(client, "bar")
foo = Metric(client, "foo")
dba = Metric(client, "dba")
bar.send("1")
foo.send("1")
dba.send("1")
self.assertEqual(clients[0].data, ["bar:1",
"dba:1"])
self.assertEqual(clients[1].data, ["foo:1"])
def test_hash_with_three_clients(self):
clients = [
FakeClient("127.0.0.1", 10001),
FakeClient("127.0.0.1", 10002),
FakeClient("127.0.0.1", 10003),
]
client = ConsistentHashingClient(clients)
bar = Metric(client, "bar")
foo = Metric(client, "foo")
dba = Metric(client, "dba")
bar.send("1")
foo.send("1")
dba.send("1")
self.assertEqual(clients[0].data, ["bar:1"])
self.assertEqual(clients[1].data, ["foo:1"])
self.assertEqual(clients[2].data, ["dba:1"])
def test_connect_with_two_clients(self):
clients = [
FakeClient("127.0.0.1", 10001),
FakeClient("127.0.0.1", 10002),
]
client = ConsistentHashingClient(clients)
client.connect()
self.assertTrue(clients[0].connect_called)
self.assertTrue(clients[1].connect_called)
def test_disconnect_with_two_clients(self):
clients = [
FakeClient("127.0.0.1", 10001),
FakeClient("127.0.0.1", 10002),
]
client = ConsistentHashingClient(clients)
client.disconnect()
self.assertTrue(clients[0].disconnect_called)
self.assertTrue(clients[1].disconnect_called)
class DummyTransport(object):
def stopListening(self):
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from datetime import datetime
from unittest import mock
from urllib.parse import parse_qs, urlparse
from google.cloud.logging import Resource
from google.cloud.logging_v2.types import ListLogEntriesRequest, ListLogEntriesResponse, LogEntry
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.google.cloud.log.stackdriver_task_handler import StackdriverTaskHandler
from airflow.utils.state import State
def _create_list_log_entries_response_mock(messages, token):
return ListLogEntriesResponse(
entries=[LogEntry(json_payload={"message": message}) for message in messages], next_page_token=token
)
def _remove_stackdriver_handlers():
for handler_ref in reversed(logging._handlerList[:]):
handler = handler_ref()
if not isinstance(handler, StackdriverTaskHandler):
continue
logging._removeHandlerRef(handler_ref)
del handler
class TestStackdriverLoggingHandlerStandalone(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_pass_message_to_client(self, mock_client, mock_get_creds_and_project_id):
self.addCleanup(_remove_stackdriver_handlers)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
transport_type = mock.MagicMock()
stackdriver_task_handler = StackdriverTaskHandler(transport=transport_type, labels={"key": 'value'})
logger = logging.getLogger("logger")
logger.addHandler(stackdriver_task_handler)
logger.info("test-message")
stackdriver_task_handler.flush()
transport_type.assert_called_once_with(mock_client.return_value, 'airflow')
transport_type.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels={"key": 'value'}, resource=Resource(type='global', labels={})
)
mock_client.assert_called_once_with(credentials='creds', client_info=mock.ANY, project="project_id")
class TestStackdriverLoggingHandlerTask(unittest.TestCase):
def setUp(self) -> None:
self.transport_mock = mock.MagicMock()
self.stackdriver_task_handler = StackdriverTaskHandler(transport=self.transport_mock)
self.logger = logging.getLogger("logger")
date = datetime(2016, 1, 1)
self.dag = DAG('dag_for_testing_file_task_handler', start_date=date)
task = DummyOperator(task_id='task_for_testing_file_log_handler', dag=self.dag)
self.ti = TaskInstance(task=task, execution_date=date)
self.ti.try_number = 1
self.ti.state = State.RUNNING
self.addCleanup(self.dag.clear)
self.addCleanup(_remove_stackdriver_handlers)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_set_labels(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.stackdriver_task_handler.set_context(self.ti)
self.logger.addHandler(self.stackdriver_task_handler)
self.logger.info("test-message")
self.stackdriver_task_handler.flush()
labels = {
'task_id': 'task_for_testing_file_log_handler',
'dag_id': 'dag_for_testing_file_task_handler',
'execution_date': '2016-01-01T00:00:00+00:00',
'try_number': '1',
}
resource = Resource(type='global', labels={})
self.transport_mock.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels=labels, resource=resource
)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_append_labels(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.stackdriver_task_handler = StackdriverTaskHandler(
transport=self.transport_mock, labels={"product.googleapis.com/task_id": "test-value"}
)
self.stackdriver_task_handler.set_context(self.ti)
self.logger.addHandler(self.stackdriver_task_handler)
self.logger.info("test-message")
self.stackdriver_task_handler.flush()
labels = {
'task_id': 'task_for_testing_file_log_handler',
'dag_id': 'dag_for_testing_file_task_handler',
'execution_date': '2016-01-01T00:00:00+00:00',
'try_number': '1',
'product.googleapis.com/task_id': 'test-value',
}
resource = Resource(type='global', labels={})
self.transport_mock.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels=labels, resource=resource
)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_for_all_try(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_log_entries.return_value.pages = iter(
[_create_list_log_entries_response_mock(["MSG1", "MSG2"], None)]
)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_log_entries.assert_called_once_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'resource.type="global"\n'
'logName="projects/project_id/logs/airflow"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"'
),
order_by='timestamp asc',
page_size=1000,
page_token=None,
)
)
assert [(('default-hostname', 'MSG1\nMSG2'),)] == logs
assert [{'end_of_log': True}] == metadata
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_for_task_with_quote(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_log_entries.return_value.pages = iter(
[_create_list_log_entries_response_mock(["MSG1", "MSG2"], None)]
)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.ti.task_id = "K\"OT"
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_log_entries.assert_called_once_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'resource.type="global"\n'
'logName="projects/project_id/logs/airflow"\n'
'labels.task_id="K\\"OT"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"'
),
order_by='timestamp asc',
page_size=1000,
page_token=None,
)
)
assert [(('default-hostname', 'MSG1\nMSG2'),)] == logs
assert [{'end_of_log': True}] == metadata
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_for_single_try(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_log_entries.return_value.pages = iter(
[_create_list_log_entries_response_mock(["MSG1", "MSG2"], None)]
)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata = self.stackdriver_task_handler.read(self.ti, 3)
mock_client.return_value.list_log_entries.assert_called_once_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'resource.type="global"\n'
'logName="projects/project_id/logs/airflow"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"\n'
'labels.try_number="3"'
),
order_by='timestamp asc',
page_size=1000,
page_token=None,
)
)
assert [(('default-hostname', 'MSG1\nMSG2'),)] == logs
assert [{'end_of_log': True}] == metadata
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_with_pagination(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_log_entries.side_effect = [
mock.MagicMock(pages=iter([_create_list_log_entries_response_mock(["MSG1", "MSG2"], "TOKEN1")])),
mock.MagicMock(pages=iter([_create_list_log_entries_response_mock(["MSG3", "MSG4"], None)])),
]
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata1 = self.stackdriver_task_handler.read(self.ti, 3)
mock_client.return_value.list_log_entries.assert_called_once_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'''resource.type="global"
logName="projects/project_id/logs/airflow"
labels.task_id="task_for_testing_file_log_handler"
labels.dag_id="dag_for_testing_file_task_handler"
labels.execution_date="2016-01-01T00:00:00+00:00"
labels.try_number="3"'''
),
order_by='timestamp asc',
page_size=1000,
page_token=None,
)
)
assert [(('default-hostname', 'MSG1\nMSG2'),)] == logs
assert [{'end_of_log': False, 'next_page_token': 'TOKEN1'}] == metadata1
mock_client.return_value.list_log_entries.return_value.next_page_token = None
logs, metadata2 = self.stackdriver_task_handler.read(self.ti, 3, metadata1[0])
mock_client.return_value.list_log_entries.assert_called_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'resource.type="global"\n'
'logName="projects/project_id/logs/airflow"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"\n'
'labels.try_number="3"'
),
order_by='timestamp asc',
page_size=1000,
page_token="TOKEN1",
)
)
assert [(('default-hostname', 'MSG3\nMSG4'),)] == logs
assert [{'end_of_log': True}] == metadata2
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_with_download(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_log_entries.side_effect = [
mock.MagicMock(pages=iter([_create_list_log_entries_response_mock(["MSG1", "MSG2"], "TOKEN1")])),
mock.MagicMock(pages=iter([_create_list_log_entries_response_mock(["MSG3", "MSG4"], None)])),
]
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata1 = self.stackdriver_task_handler.read(self.ti, 3, {'download_logs': True})
assert [(('default-hostname', 'MSG1\nMSG2\nMSG3\nMSG4'),)] == logs
assert [{'end_of_log': True}] == metadata1
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_read_logs_with_custom_resources(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
resource = Resource(
type="cloud_composer_environment",
labels={
"environment.name": 'test-instance',
"location": 'europe-west-3',
"project_id": "project_id",
},
)
self.stackdriver_task_handler = StackdriverTaskHandler(
transport=self.transport_mock, resource=resource
)
entry = mock.MagicMock(json_payload={"message": "TEXT"})
page = mock.MagicMock(entries=[entry, entry], next_page_token=None)
mock_client.return_value.list_log_entries.return_value.pages = (n for n in [page])
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_log_entries.assert_called_once_with(
request=ListLogEntriesRequest(
resource_names=["projects/project_id"],
filter=(
'resource.type="cloud_composer_environment"\n'
'logName="projects/project_id/logs/airflow"\n'
'resource.labels."environment.name"="test-instance"\n'
'resource.labels.location="europe-west-3"\n'
'resource.labels.project_id="project_id"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"'
),
order_by='timestamp asc',
page_size=1000,
page_token=None,
)
)
assert [(('default-hostname', 'TEXT\nTEXT'),)] == logs
assert [{'end_of_log': True}] == metadata
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_use_credentials(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
stackdriver_task_handler = StackdriverTaskHandler(
gcp_key_path="KEY_PATH",
)
client = stackdriver_task_handler._client
mock_get_creds_and_project_id.assert_called_once_with(
disable_logging=True,
key_path='KEY_PATH',
scopes=frozenset(
{
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/logging.read',
}
),
)
mock_client.assert_called_once_with(credentials='creds', client_info=mock.ANY, project="project_id")
assert mock_client.return_value == client
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.LoggingServiceV2Client')
def test_should_return_valid_external_url(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
stackdriver_task_handler = StackdriverTaskHandler(
gcp_key_path="KEY_PATH",
)
url = stackdriver_task_handler.get_external_log_url(self.ti, self.ti.try_number)
parsed_url = urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
assert 'https' == parsed_url.scheme
assert 'console.cloud.google.com' == parsed_url.netloc
assert '/logs/viewer' == parsed_url.path
assert {'project', 'interval', 'resource', 'advancedFilter'} == set(parsed_qs.keys())
assert 'global' in parsed_qs['resource']
filter_params = parsed_qs['advancedFilter'][0].split('\n')
expected_filter = [
'resource.type="global"',
'logName="projects/project_id/logs/airflow"',
f'labels.task_id="{self.ti.task_id}"',
f'labels.dag_id="{self.dag.dag_id}"',
f'labels.execution_date="{self.ti.execution_date.isoformat()}"',
f'labels.try_number="{self.ti.try_number}"',
]
assert set(expected_filter) == set(filter_params)
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from py4j.java_gateway import get_java_class
from typing import Optional
from pyflink.java_gateway import get_gateway
__all__ = [
'CheckpointStorage',
'JobManagerCheckpointStorage',
'FileSystemCheckpointStorage',
'CustomCheckpointStorage']
def _from_j_checkpoint_storage(j_checkpoint_storage):
if j_checkpoint_storage is None:
return None
gateway = get_gateway()
JCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.CheckpointStorage
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.JobManagerCheckpointStorage
JFileSystemCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.FileSystemCheckpointStorage
j_clz = j_checkpoint_storage.getClass()
if not get_java_class(JCheckpointStorage).isAssignableFrom(j_clz):
raise TypeError("%s is not an instance of CheckpointStorage." % j_checkpoint_storage)
if get_java_class(JJobManagerCheckpointStorage).isAssignableFrom(j_clz):
return JobManagerCheckpointStorage(j_jobmanager_checkpoint_storage=j_checkpoint_storage)
elif get_java_class(JFileSystemCheckpointStorage).isAssignableFrom(j_clz):
return FileSystemCheckpointStorage(j_filesystem_checkpoint_storage=j_checkpoint_storage)
else:
return CustomCheckpointStorage(j_checkpoint_storage)
class CheckpointStorage(object, metaclass=ABCMeta):
"""
Checkpoint storage defines how :class:`StateBackend`'s store their state for fault-tolerance
in streaming applications. Various implementations store their checkpoints in different fashions
and have different requirements and availability guarantees.
For example, :class:`JobManagerCheckpointStorage` stores checkpoints in the memory of the
`JobManager`. It is lightweight and without additional dependencies but is not scalable
and only supports small state sizes. This checkpoints storage policy is convenient for local
testing and development.
:class:`FileSystemCheckpointStorage` stores checkpoints in a filesystem. For systems like HDFS
NFS drives, S3, and GCS, this storage policy supports large state size, in the magnitude of many
terabytes while providing a highly available foundation for streaming applications. This
checkpoint storage policy is recommended for most production deployments.
**Raw Bytes Storage**
The `CheckpointStorage` creates services for raw bytes storage.
The raw bytes storage (through the CheckpointStreamFactory) is the fundamental service that
simply stores bytes in a fault tolerant fashion. This service is used by the JobManager to
store checkpoint and recovery metadata and is typically also used by the keyed- and operator-
state backends to store checkpoint state.
**Serializability**
Implementations need to be serializable(`java.io.Serializable`), because they are distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that `CheckpointStorage` implementations are meant to be like _factories_ that create
the proper state stores that provide access to the persistent layer. That way, the storage
policy can be very lightweight (contain only configurations) which makes it easier to be
serializable.
**Thread Safety**
Checkpoint storage implementations have to be thread-safe. Multiple threads may be creating
streams concurrently.
"""
def __init__(self, j_checkpoint_storage):
self._j_checkpoint_storage = j_checkpoint_storage
class JobManagerCheckpointStorage(CheckpointStorage):
"""
The `CheckpointStorage` checkpoints state directly to the JobManager's memory (hence the
name), but savepoints will be persisted to a file system.
This checkpoint storage is primarily for experimentation, quick local setups, or for streaming
applications that have very small state: Because it requires checkpoints to go through the
JobManager's memory, larger state will occupy larger portions of the JobManager's main memory,
reducing operational stability. For any other setup, the `FileSystemCheckpointStorage`
should be used. The `FileSystemCheckpointStorage` but checkpoints state directly to files
rather than to the JobManager's memory, thus supporting larger state sizes and more highly
available recovery.
**State Size Considerations**
State checkpointing with this checkpoint storage is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this storage, it does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all checkpoint storage, this type can either be configured within the application (by
creating the storage with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the storage was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
max_state_size=None,
j_jobmanager_checkpoint_storage=None):
"""
Creates a new JobManagerCheckpointStorage, setting optionally the paths to persist
checkpoint metadata to, as well as configuring state thresholds.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> checkpoint_storage = JobManagerCheckpointStorage()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param j_jobmanager_checkpoint_storage: For internal use, please keep none.
"""
if j_jobmanager_checkpoint_storage is None:
gateway = get_gateway()
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage\
.JobManagerCheckpointStorage
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_path is not None:
checkpoint_path = JPath(checkpoint_path)
if max_state_size is None:
max_state_size = JJobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE
j_jobmanager_checkpoint_storage = JJobManagerCheckpointStorage(checkpoint_path,
max_state_size)
super(JobManagerCheckpointStorage, self).__init__(j_jobmanager_checkpoint_storage)
def get_checkpoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
j_path = self._j_checkpoint_storage.getCheckpointPath()
if j_path is None:
return None
else:
return j_path.toString()
def get_max_state_size(self) -> int:
"""
Gets the maximum size that an individual state can have, as configured in the
constructor. By default :data:`DEFAULT_MAX_STATE_SIZE` will be used.
"""
return self._j_checkpoint_storage.getMaxStateSize()
def get_savepoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the savepoints are stored.
The job-specific savepoint directory is created inside this directory.
:return: The base directory for savepoints.
"""
j_path = self._j_checkpoint_storage.getSavepointPath()
if j_path is None:
return None
else:
return j_path.toString()
def __str__(self):
return self._j_checkpoint_storage.toString()
class FileSystemCheckpointStorage(CheckpointStorage):
"""
`FileSystemCheckpointStorage` checkpoints state as files to a filesystem.
Each checkpoint will store all its files in a subdirectory that includes the
checkpoints number, such as `hdfs://namenode:port/flink-checkpoints/chk-17/`.
**State Size Considerations**
This checkpoint storage stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threashold `get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this checkpoint storage are as persistent and available as the filesystem
that it is written to. If the file system is a persistent distributed file system, this
checkpoint storage supports highly available setups. The backend additionally supports
savepoints and externalized checkpoints.
**Configuration**
As for all checkpoint storage policies, this backend can either be configured within the
application (by creating the storage with the respective constructor parameters and setting
it on the execution environment) or by specifying it in the Flink configuration.
If the checkpoint storage was specified in the application, it may pick up additional
configuration parameters from the Flink configuration. For example, if the storage is configured
in the application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster.
"""
# Maximum size of state that is stored with the metadata, rather than in files (1 MiByte).
MAX_FILE_STATE_THRESHOLD = 1024 * 1024
def __init__(self,
checkpoint_path=None,
file_state_size_threshold=None,
write_buffer_size=-1,
j_filesystem_checkpoint_storage=None):
"""
Creates a new FileSystemCheckpointStorage, setting the paths for the checkpoint data
in a file system.
All file systems for the file system scheme in the URI (e.g., `file://`, `hdfs://`, or
`s3://`) must be accessible via `FileSystem#get`.
For a Job targeting HDFS, this means that the URI must either specify the authority (host
and port), of the Hadoop configuration that describes that information must be in the
classpath.
Example:
::
>>> checkpoint_storage = FileSystemCheckpointStorage("hdfs://checkpoints")
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param file_state_size_threshold: State below this size will be stored as part of the
metadata, rather than in files. If -1, the value configured
in the runtime configuration will be used, or the default
value (1KB) if nothing is configured.
:param write_buffer_size: Write buffer size used to serialize state. If -1, the value
configured in the runtime configuration will be used, or the
default value (4KB) if nothing is configured.
:param j_filesystem_checkpoint_storage: For internal use, please keep none.
"""
if j_filesystem_checkpoint_storage is None:
gateway = get_gateway()
JFileSystemCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage\
.FileSystemCheckpointStorage
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_path is None:
raise ValueError("checkpoint_path must not be None")
else:
checkpoint_path = JPath(checkpoint_path)
if file_state_size_threshold is None:
file_state_size_threshold = -1
j_filesystem_checkpoint_storage = JFileSystemCheckpointStorage(
checkpoint_path,
file_state_size_threshold,
write_buffer_size)
super(FileSystemCheckpointStorage, self).__init__(j_filesystem_checkpoint_storage)
def get_checkpoint_path(self) -> str:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
return self._j_checkpoint_storage.getCheckpointPath().toString()
def get_savepoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the savepoints are stored.
The job-specific savepoint directory is created inside this directory.
:return: The base directory for savepoints.
"""
j_path = self._j_checkpoint_storage.getSavepointPath()
if j_path is None:
return None
else:
return j_path.toString()
def get_min_file_size_threshold(self) -> int:
"""
Gets the threshold below which state is stored as part of the metadata, rather than in
file. This threshold ensures the backend does not create a large amount of small files,
where potentially the file pointers are larget than the state itself.
"""
return self._j_checkpoint_storage.getMinFileSizeThreshold()
def get_write_buffer_size(self) -> int:
"""
Gets the write buffer size for created checkpoint streams.
"""
return self._j_checkpoint_storage.getWriteBufferSize()
def __str__(self):
return self._j_checkpoint_storage.toString()
class CustomCheckpointStorage(CheckpointStorage):
"""
A wrapper of customized java checkpoint storage.
"""
def __init__(self, j_custom_checkpoint_storage):
super(CustomCheckpointStorage, self).__init__(j_custom_checkpoint_storage)
|
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import map
from builtins import range
from builtins import str
from builtins import zip
from functools import reduce
from past.utils import old_div
if __name__ == '__main__':
pass
import argparse
import csv
import hashlib
import itertools
import logging
import math
import os
import sqlalchemy.orm.exc
import subprocess
import sys
from collections import defaultdict
from fn import _
from fn import Stream
from fn.iters import repeat
from pprint import pprint
import opentuner
from opentuner import resultsdb
from opentuner.resultsdb.models import *
log = logging.getLogger('opentuner.utils.stats')
argparser = argparse.ArgumentParser()
argparser.add_argument('--label')
argparser.add_argument('--stats', action='store_true',
help="run in stats mode")
argparser.add_argument('--by-request-count', action='store_true',
help='report stats by request count')
argparser.add_argument('--stats-quanta', type=float, default=10,
help="step size in seconds for binning with --stats")
argparser.add_argument('--stats-dir', default='stats',
help="directory to output --stats to")
argparser.add_argument('--stats-input', default="opentuner.db")
argparser.add_argument('--min-runs', type=int, default=1,
help="ignore series with less then N runs")
PCTSTEPS = list(map(old_div(_, 20.0), list(range(21))))
def mean(vals):
n = 0.0
d = 0.0
for v in vals:
if v is not None:
n += v
d += 1.0
if d == 0.0:
return None
return old_div(n, d)
def median(vals):
vals = sorted(vals)
a = old_div((len(vals) - 1), 2)
b = old_div((len(vals)), 2)
return old_div((vals[a] + vals[b]), 2.0)
def percentile(vals, pct):
vals = sorted(vals)
pos = (len(vals) - 1) * pct
a = int(math.floor(pos))
b = min(len(vals) - 1, a + 1)
return (1.0 - (pos - a)) * vals[a] + (pos - a) * vals[b]
def variance(vals):
vals = [x for x in vals if x is not None]
avg = mean(vals)
if avg is None:
return None
if avg in (float('inf'), float('-inf')):
return avg
return mean(list(map((_ - avg) ** 2, vals)))
def stddev(vals):
var = variance(vals)
if var is None:
return None
return math.sqrt(var)
def hash_args(x):
d = dict(vars(x))
for k in ('database', 'results_log', 'results_log_details'):
d[k] = None
return hashlib.sha256(str(sorted(d.items()))).hexdigest()[:20]
def run_label(tr, short=False):
techniques = ','.join(tr.args.technique)
if not tr.name or tr.name == 'unnamed':
if short:
return techniques
else:
return "%s_%s" % (techniques, hash_args(tr.args)[:6])
else:
return tr.name
def run_dir(base, tr):
return os.path.join(base,
tr.program.project,
tr.program.name.split('/')[-1],
tr.program_version.version[:16])
class StatsMain(object):
def __init__(self, args):
self.args = args
path = args.stats_input
self.dbs = list()
for f in os.listdir(path):
if 'journal' in f:
continue
try:
e, sm = resultsdb.connect('sqlite:///' + os.path.join(path, f))
self.dbs.append(sm())
except:
log.error('failed to load database: %s',
os.path.join(path, f),
exc_info=True)
def main(self):
dir_label_runs = defaultdict(lambda: defaultdict(list))
for session in self.dbs:
q = (session.query(resultsdb.models.TuningRun)
.filter_by(state='COMPLETE')
.order_by('name'))
if self.args.label:
q = q.filter(TuningRun.name.in_(
list(map(str.strip, self.args.label.split(',')))))
for tr in q:
d = run_dir(self.args.stats_dir, tr)
d = os.path.normpath(d)
dir_label_runs[d][run_label(tr)].append((tr, session))
summary_report = defaultdict(lambda: defaultdict(list))
for d, label_runs in list(dir_label_runs.items()):
if not os.path.isdir(d):
os.makedirs(d)
session = list(label_runs.values())[0][0][1]
objective = list(label_runs.values())[0][0][0].objective
all_run_ids = list(map(_[0].id, itertools.chain(*list(label_runs.values()))))
q = (session.query(Result)
.filter(Result.tuning_run_id.in_(all_run_ids))
.filter(Result.time < float('inf'))
.filter_by(was_new_best=True, state='OK'))
total = q.count()
if total == 0:
continue
q = objective.filter_acceptable(q)
acceptable = q.count()
q = q.order_by(*objective.result_order_by_terms())
best = q.limit(1).one()
worst = q.offset(acceptable - 1).limit(1).one()
list(map(len, list(label_runs.values())))
log.info("%s -- best %.4f / worst %.f4 "
"-- %d of %d acceptable -- %d techniques with %d to %d runs",
d,
best.time,
worst.time,
acceptable,
total,
len(list(label_runs.values())),
min(list(map(len, list(label_runs.values())))),
max(list(map(len, list(label_runs.values())))))
for label, runs in sorted(label_runs.items()):
if len(runs) < self.args.min_runs:
print(len(runs), self.args.min_runs)
continue
log.debug('%s/%s has %d runs %s', d, label, len(runs), runs[0][0].args.technique)
self.combined_stats_over_time(d, label, runs, objective, worst, best)
final_scores = list()
for run, session in runs:
try:
final = (session.query(Result)
.filter_by(tuning_run=run,
configuration=run.final_config)
.limit(1)
.one())
except sqlalchemy.orm.exc.NoResultFound:
continue
final_scores.append(objective.stats_quality_score(final, worst, best))
final_scores.sort()
if final_scores:
norm = objective.stats_quality_score(best, worst, best)
if norm > 0.00001:
summary_report[d][run_label(run, short=True)] = (
old_div(percentile(final_scores, 0.5), norm),
old_div(percentile(final_scores, 0.1), norm),
old_div(percentile(final_scores, 0.9), norm),
)
else:
summary_report[d][run_label(run, short=True)] = (
percentile(final_scores, 0.5) + norm + 1.0,
percentile(final_scores, 0.1) + norm + 1.0,
percentile(final_scores, 0.9) + norm + 1.0,
)
with open(self.args.stats_dir + "/summary.dat", 'w') as o:
# make summary report
keys = sorted(reduce(set.union,
[set(x.keys()) for x in list(summary_report.values())],
set()))
print('#####', end=' ', file=o)
for k in keys:
print(k, end=' ', file=o)
print(file=o)
for d, label_vals in sorted(summary_report.items()):
print(d.split('/')[-2], end=' ', file=o)
for k in keys:
if k in label_vals:
print('-', label_vals[k][0], label_vals[k][1], label_vals[k][2], end=' ', file=o)
else:
print('-', '-', '-', '-', end=' ', file=o)
print(file=o)
if keys:
plotcmd = ["""1 w lines lt 1 lc rgb "black" notitle""",
"""'summary.dat' using 3:4:5:xtic(1) ti "%s" """ % keys[0]]
for n, k in enumerate(keys[1:]):
plotcmd.append("""'' using %d:%d:%d ti "%s" """ % (
4 * n + 7,
4 * n + 8,
4 * n + 9,
k))
self.gnuplot_summary_file(self.args.stats_dir, 'summary', plotcmd)
for d, label_runs in list(dir_label_runs.items()):
labels = [k for k, v in list(label_runs.items())
if len(v) >= self.args.min_runs]
self.gnuplot_file(d,
"medianperfe",
['"%s_percentiles.dat" using 1:12:4:18 with errorbars title "%s"' % (l, l) for l in
labels])
self.gnuplot_file(d,
"meanperfe",
['"%s_percentiles.dat" using 1:21:4:18 with errorbars title "%s"' % (l, l) for l in
labels])
self.gnuplot_file(d,
"medianperfl",
['"%s_percentiles.dat" using 1:12 with lines title "%s"' % (l, l) for l in labels])
self.gnuplot_file(d,
"meanperfl",
['"%s_percentiles.dat" using 1:21 with lines title "%s"' % (l, l) for l in labels])
# print
# print "10% Scores", d
# pprint(self.technique_scores(d, labels, '0.1'))
# print
# print "90% Scores", d
# pprint(self.technique_scores(d, labels, '0.9'))
# print
# print "Mean Scores", d
# pprint(self.technique_scores(d, labels, 'mean'))
print()
print("Median Scores", d)
pprint(self.technique_scores(d, labels, '0.5'))
def technique_scores(self, directory, labels, ykey, xkey='#sec', factor=10.0):
max_duration = None
min_value = float('inf')
for label in labels:
try:
dr = csv.DictReader(open(os.path.join(directory, label + "_percentiles.dat")), delimiter=' ',
lineterminator='\n')
lastrow = list(dr)[-1]
max_duration = max(max_duration, float(lastrow[xkey]))
min_value = min(min_value, float(lastrow[ykey]))
except:
log.exception("failed computing score")
scores = list()
for label in labels:
try:
dr = csv.DictReader(open(os.path.join(directory, label + "_percentiles.dat")), delimiter=' ',
lineterminator='\n')
score = 0.0
lastsec = 0.0
value = float('inf')
for row in dr:
duration = float(row[xkey]) - lastsec
lastsec = float(row[xkey])
value = float(row[ykey])
score += duration * (value - min_value)
score += (factor * max_duration - lastsec) * (value - min_value)
scores.append((score, label))
except:
log.exception("failed computing score")
return sorted(scores)
def combined_stats_over_time(self,
output_dir,
label,
runs,
objective,
worst,
best,
):
"""
combine stats_over_time() vectors for multiple runs
"""
# extract_fn = lambda dr: objective.stats_quality_score(dr.result, worst, best)
extract_fn = _.result.time
combine_fn = min
no_data = 999
log.debug("writing stats for %s to %s", label, output_dir)
by_run = [self.stats_over_time(session, run, extract_fn, combine_fn, no_data)
for run, session in runs]
max_len = max(list(map(len, by_run)))
by_run_streams = [Stream() << x << repeat(x[-1], max_len - len(x))
for x in by_run]
by_quanta = list(zip(*by_run_streams[:]))
def data_file(suffix, headers, value_function):
with open(os.path.join(output_dir, label + suffix), 'w') as fd:
out = csv.writer(fd, delimiter=' ', lineterminator='\n')
out.writerow(['#sec'] + headers)
for quanta, values in enumerate(by_quanta):
sec = quanta * self.args.stats_quanta
out.writerow([sec] + value_function(values))
# data_file('_details.dat',
# map(lambda x: 'run%d'%x, xrange(max_len)),
# list)
# self.gnuplot_file(output_dir,
# label+'_details',
# [('"'+label+'_details.dat"'
# ' using 1:%d'%i +
# ' with lines'
# ' title "Run %d"'%i)
# for i in xrange(max_len)])
data_file('_mean.dat',
['#sec', 'mean', 'stddev'],
lambda values: [mean(values), stddev(values)])
self.gnuplot_file(output_dir,
label + '_mean',
['"' + label + '_mean.dat" using 1:2 with lines title "Mean"'])
def extract_percentiles(values):
values = sorted(values)
return ([values[int(round(p * (len(values) - 1)))] for p in PCTSTEPS]
+ [mean(values)])
data_file("_percentiles.dat", PCTSTEPS + ['mean'], extract_percentiles)
self.gnuplot_file(output_dir,
label + '_percentiles',
reversed([
'"' + label + '_percentiles.dat" using 1:2 with lines title "0%"',
# '"" using 1:3 with lines title "5%"',
'"" using 1:4 with lines title "10%"',
# '"" using 1:5 with lines title "25%"',
'"" using 1:6 with lines title "20%"',
# '"" using 1:7 with lines title "35%"',
'"" using 1:8 with lines title "30%"',
# '"" using 1:9 with lines title "45%"',
'"" using 1:10 with lines title "40%"',
# '"" using 1:11 with lines title "55%"',
'"" using 1:12 with lines title "50%"',
# '"" using 1:13 with lines title "65%"',
'"" using 1:14 with lines title "70%"',
# '"" using 1:15 with lines title "75%"',
'"" using 1:16 with lines title "80%"',
# '"" using 1:17 with lines title "85%"',
'"" using 1:18 with lines title "90%"',
# '"" using 1:19 with lines title "95%"',
'"' + label + '_percentiles.dat" using 1:20 with lines title "100%"',
]))
def gnuplot_file(self, output_dir, prefix, plotcmd):
with open(os.path.join(output_dir, prefix + '.gnuplot'), 'w') as fd:
print('set terminal postscript eps enhanced color', file=fd)
print('set output "%s"' % (prefix + '.eps'), file=fd)
print('set ylabel "Execution Time (seconds)"', file=fd)
print('set xlabel "Autotuning Time (seconds)"', file=fd)
print('plot', ',\\\n'.join(plotcmd), file=fd)
try:
subprocess.call(['gnuplot', prefix + '.gnuplot'], cwd=output_dir, stdin=None)
except OSError:
log.error("command gnuplot not found")
def gnuplot_summary_file(self, output_dir, prefix, plotcmd):
with open(os.path.join(output_dir, prefix + '.gnuplot'), 'w') as fd:
print('set terminal postscript eps enhanced color', file=fd)
print('set output "%s"' % (prefix + '.eps'), file=fd)
print('''
set boxwidth 0.9
set style fill solid 1.00 border 0
set style histogram errorbars gap 2 lw 1
set style data histograms
set xtics rotate by -45
set bars 0.5
set yrange [0:20]
set yrange [0:10]
set key out vert top left
set size 1.5,1
set ytics 1
''', file=fd)
print('plot', ',\\\n'.join(plotcmd), file=fd)
subprocess.call(['gnuplot', prefix + '.gnuplot'], cwd=output_dir, stdin=None)
def stats_over_time(self,
session,
run,
extract_fn,
combine_fn,
no_data=None):
"""
return reduce(combine_fn, map(extract_fn, data)) for each quanta of the
tuning run
"""
value_by_quanta = [no_data]
start_date = run.start_date
subq = (session.query(Result.id)
.filter_by(tuning_run=run, was_new_best=True, state='OK'))
q = (session.query(DesiredResult)
.join(Result)
.filter(DesiredResult.state == 'COMPLETE',
DesiredResult.tuning_run == run,
DesiredResult.result_id.in_(subq.subquery()))
.order_by(DesiredResult.request_date))
first_id = None
for dr in q:
if first_id is None:
first_id = dr.id
td = (dr.request_date - start_date)
duration = td.seconds + (td.days * 24 * 3600.0)
if self.args.by_request_count:
quanta = dr.id - first_id
else:
quanta = int(old_div(duration, self.args.stats_quanta))
while len(value_by_quanta) <= quanta:
value_by_quanta.append(value_by_quanta[-1])
if value_by_quanta[-1] is no_data:
value_by_quanta[-1] = extract_fn(dr)
else:
value_by_quanta[-1] = combine_fn(value_by_quanta[-1], extract_fn(dr))
return value_by_quanta
if __name__ == '__main__':
opentuner.tuningrunmain.init_logging()
sys.exit(StatsMain(argparser.parse_args()).main())
|
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Low level code generation for parameter parsing.
"""
from nuitka.utils.Utils import python_version
from .ConstantCodes import getConstantCode
from .Indentation import indented
from .templates.CodeTemplatesParameterParsing import (
template_argparse_assign_from_dict_finding,
template_argparse_assign_from_dict_parameter_quick_path,
template_argparse_assign_from_dict_parameter_quick_path_kw_only,
template_argparse_assign_from_dict_parameter_slow_path,
template_argparse_assign_from_dict_parameter_slow_path_kw_only,
template_argparse_assign_from_dict_parameters,
template_argparse_nested_argument,
template_argparse_plain_argument,
template_parameter_dparser_entry_point,
template_parameter_function_entry_point,
template_parameter_function_refuses,
template_parse_argument_check_counts_without_list_star_arg,
template_parse_argument_check_dict_parameter_with_star_dict,
template_parse_argument_copy_list_star_args,
template_parse_argument_dict_star_copy,
template_parse_argument_nested_argument_assign,
template_parse_argument_nested_argument_unpack,
template_parse_argument_usable_count,
template_parse_arguments_check,
template_parse_kwonly_argument_default,
template_parse_kwonly_arguments_check
)
def getParameterEntryPointIdentifier(function_identifier):
return "fparse_" + function_identifier
def getQuickEntryPointIdentifier(function_identifier, parameters):
if parameters.hasNestedParameterVariables() or \
parameters.getKwOnlyParameterCount() > 0:
return "NULL"
else:
return "dparse_" + function_identifier
def getDirectFunctionEntryPointIdentifier(function_identifier):
return "impl_" + function_identifier
def _getParameterParsingCode(context, parameters, function_name):
# There is really no way this could be any less complex.
# pylint: disable=R0912,R0914
# First, declare all parameter objects as variables.
parameter_parsing_code = "".join(
[
"PyObject *_python_par_" + variable.getCodeName() + " = NULL;\n"
for variable in
parameters.getAllVariables()
]
)
top_level_parameters = parameters.getTopLevelVariables()
# Max allowed number of positional arguments, all except keyword only
# arguments.
plain_possible_count = len(top_level_parameters) - \
parameters.getKwOnlyParameterCount()
if top_level_parameters:
parameter_parsing_code += "// Copy given dictionary values to the the respective variables:\n"
if parameters.getDictStarArgVariable() is not None:
# In the case of star dict arguments, we need to check what is for it
# and which arguments with names we have.
parameter_parsing_code += template_parse_argument_dict_star_copy % {
"dict_star_parameter_name" : parameters.getStarDictArgumentName(),
"function_name" : function_name,
}
# Check for each variable.
for variable in top_level_parameters:
if not variable.isNestedParameterVariable():
parameter_parsing_code += template_parse_argument_check_dict_parameter_with_star_dict % {
"parameter_name" : variable.getCodeName(),
"parameter_name_object" : getConstantCode(
constant = variable.getName(),
context = context
),
"dict_star_parameter_name" : parameters.getStarDictArgumentName(),
}
elif not parameters.isEmpty():
quick_path_code = ""
slow_path_code = ""
for variable in top_level_parameters:
# Only named ones can be assigned from the dict.
if variable.isNestedParameterVariable():
continue
parameter_name_object = getConstantCode(
constant = variable.getName(),
context = context
)
parameter_assign_from_kw = template_argparse_assign_from_dict_finding % {
"parameter_name" : variable.getCodeName(),
}
if variable.isParameterVariableKwOnly():
assign_quick = template_argparse_assign_from_dict_parameter_quick_path_kw_only
assign_slow = template_argparse_assign_from_dict_parameter_slow_path_kw_only
else:
assign_quick = template_argparse_assign_from_dict_parameter_quick_path
assign_slow = template_argparse_assign_from_dict_parameter_slow_path
quick_path_code += assign_quick % {
"parameter_name_object" : parameter_name_object,
"parameter_assign_from_kw" : indented(parameter_assign_from_kw)
}
slow_path_code += assign_slow % {
"parameter_name_object" : parameter_name_object,
"parameter_assign_from_kw" : indented(parameter_assign_from_kw)
}
parameter_parsing_code += template_argparse_assign_from_dict_parameters % {
"function_name" : function_name,
"parameter_quick_path" : indented(quick_path_code, 2),
"parameter_slow_path" : indented(slow_path_code, 2)
}
if parameters.isEmpty():
parameter_parsing_code += template_parameter_function_refuses % {}
elif python_version < 330:
if parameters.getListStarArgVariable() is None:
parameter_parsing_code += template_parse_argument_check_counts_without_list_star_arg % {
"top_level_parameter_count" : plain_possible_count,
}
if plain_possible_count > 0:
plain_var_names = []
parameter_parsing_code += template_parse_argument_usable_count % {}
for count, variable in enumerate(top_level_parameters):
if variable.isNestedParameterVariable():
parameter_parsing_code += template_argparse_nested_argument % {
"parameter_name" : variable.getCodeName(),
"parameter_position" : count,
"top_level_parameter_count" : plain_possible_count,
}
elif not variable.isParameterVariableKwOnly():
parameter_parsing_code += template_argparse_plain_argument % {
"parameter_name" : variable.getCodeName(),
"parameter_position" : count,
"top_level_parameter_count" : plain_possible_count,
}
plain_var_names.append("_python_par_" + variable.getCodeName())
parameter_parsing_code += template_parse_arguments_check % {
"parameter_test" : " || ".join(
"%s == NULL" % plain_var_name
for plain_var_name in
plain_var_names
),
"parameter_list" : ", ".join(plain_var_names)
}
if parameters.getListStarArgVariable() is not None:
parameter_parsing_code += template_parse_argument_copy_list_star_args % {
"list_star_parameter_name" : parameters.getStarListArgumentName(),
"top_level_parameter_count" : plain_possible_count
}
elif python_version >= 330:
parameter_parsing_code += template_parse_argument_check_counts_without_list_star_arg % {
"top_level_parameter_count" : plain_possible_count,
}
def unPackNestedParameterVariables(variables):
result = ""
for count, variable in enumerate(variables):
if variable.isNestedParameterVariable():
assign_source = "_python_par_%s" % variable.getCodeName()
unpack_code = ""
child_variables = variable.getTopLevelVariables()
for count, child_variable in enumerate(child_variables):
unpack_code += template_parse_argument_nested_argument_assign % {
"parameter_name" : child_variable.getCodeName(),
"iter_name" : variable.getName(),
"unpack_count" : count
}
result += template_parse_argument_nested_argument_unpack % {
"unpack_source_identifier" : assign_source,
"parameter_name" : variable.getCodeName(),
"unpack_code" : unpack_code
}
for variable in variables:
if variable.isNestedParameterVariable():
result += unPackNestedParameterVariables(
variables = variable.getTopLevelVariables()
)
return result
parameter_parsing_code += unPackNestedParameterVariables(
variables = top_level_parameters
)
kw_only_var_names = []
for variable in parameters.getKwOnlyVariables():
parameter_parsing_code += template_parse_kwonly_argument_default % {
"function_name" : function_name,
"parameter_name" : variable.getCodeName(),
"parameter_name_object" : getConstantCode(
constant = variable.getName(),
context = context
)
}
kw_only_var_names.append("_python_par_" + variable.getCodeName())
if kw_only_var_names:
parameter_parsing_code += template_parse_kwonly_arguments_check % {
"parameter_test" : " || ".join(
"%s == NULL" % kw_only_var_name
for kw_only_var_name in
kw_only_var_names
),
"parameter_list" : ", ".join(kw_only_var_names)
}
return indented(parameter_parsing_code)
def getParameterParsingCode(context, function_identifier, function_name,
parameters, needs_creation):
function_parameter_variables = parameters.getVariables()
if function_parameter_variables:
parameter_objects_decl = [
"PyObject *_python_par_" + variable.getCodeName()
for variable in
function_parameter_variables
]
parameter_objects_list = [
"_python_par_" + variable.getCodeName()
for variable in
function_parameter_variables
]
else:
parameter_objects_decl = []
parameter_objects_list = []
if needs_creation:
parameter_objects_decl.insert(0, "Nuitka_FunctionObject *self")
parameter_objects_list.insert(0, "self")
parameter_release_code = "".join(
[
" Py_XDECREF( _python_par_" + variable.getCodeName() + " );\n"
for variable in
parameters.getAllVariables()
if not variable.isNestedParameterVariable()
]
)
parameter_entry_point_code = template_parameter_function_entry_point % {
"parameter_parsing_code" : _getParameterParsingCode(
context = context,
function_name = function_name,
parameters = parameters,
),
"parse_function_identifier" : getParameterEntryPointIdentifier(
function_identifier = function_identifier,
),
"impl_function_identifier" : getDirectFunctionEntryPointIdentifier(
function_identifier = function_identifier
),
"parameter_objects_list" : ", ".join(parameter_objects_list),
"parameter_release_code" : parameter_release_code,
}
if not parameters.hasNestedParameterVariables() and \
not parameters.getKwOnlyParameterCount() > 0:
args_forward = []
count = -1
for count, variable in enumerate(parameters.getTopLevelVariables()):
args_forward.append(
", INCREASE_REFCOUNT( args[ %d ] )" % count
)
if parameters.getListStarArgVariable() is not None:
count += 1
args_forward.append(
", MAKE_TUPLE( &args[ %d ], size > %d ? size-%d : 0 )" % (
count, count, count
)
)
if parameters.getDictStarArgVariable() is not None:
args_forward.append(
", PyDict_New()"
)
# print args_forward
parameter_entry_point_code += template_parameter_dparser_entry_point % {
"function_identifier" : function_identifier,
"arg_count" : len(function_parameter_variables),
"args_forward" : "".join(args_forward)
}
return (
function_parameter_variables,
parameter_entry_point_code,
parameter_objects_decl
)
|
|
# $Id: ip.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
"""Internet Protocol."""
import dpkt
class IP(dpkt.Packet):
__hdr__ = (
('v_hl', 'B', (4 << 4) | (20 >> 2)),
('tos', 'B', 0),
('len', 'H', 20),
('id', 'H', 0),
('off', 'H', 0),
('ttl', 'B', 64),
('p', 'B', 0),
('sum', 'H', 0),
('src', '4s', '\x00' * 4),
('dst', '4s', '\x00' * 4)
)
_protosw = {}
opts = ''
def _get_v(self): return self.v_hl >> 4
def _set_v(self, v): self.v_hl = (v << 4) | (self.v_hl & 0xf)
v = property(_get_v, _set_v)
def _get_hl(self): return self.v_hl & 0xf
def _set_hl(self, hl): self.v_hl = (self.v_hl & 0xf0) | hl
hl = property(_get_hl, _set_hl)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
if self.sum == 0:
self.sum = dpkt.in_cksum(self.pack_hdr() + self.opts)
if (self.p == 6 or self.p == 17) and \
(self.off & (IP_MF|IP_OFFMASK)) == 0 and \
isinstance(self.data, dpkt.Packet) and self.data.sum == 0:
# Set zeroed TCP and UDP checksums for non-fragments.
p = str(self.data)
s = dpkt.struct.pack('>4s4sxBH', self.src, self.dst,
self.p, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
self.data.sum = dpkt.in_cksum_done(s)
if self.p == 17 and self.data.sum == 0:
self.data.sum = 0xffff # RFC 768
# XXX - skip transports which don't need the pseudoheader
return self.pack_hdr() + self.opts + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self.v_hl & 0xf) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
if self.len:
buf = buf[self.__hdr_len__ + ol:self.len]
else: # very likely due to TCP segmentation offload
buf = buf[self.__hdr_len__ + ol:]
try:
self.data = self._protosw[self.p](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
set_proto = classmethod(set_proto)
def get_proto(cls, p):
return cls._protosw[p]
get_proto = classmethod(get_proto)
# Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
IP_TOS_DEFAULT = 0x00 # default
IP_TOS_LOWDELAY = 0x10 # low delay
IP_TOS_THROUGHPUT = 0x08 # high throughput
IP_TOS_RELIABILITY = 0x04 # high reliability
IP_TOS_LOWCOST = 0x02 # low monetary cost - XXX
IP_TOS_ECT = 0x02 # ECN-capable transport
IP_TOS_CE = 0x01 # congestion experienced
# IP precedence (high 3 bits of ip_tos), hopefully unused
IP_TOS_PREC_ROUTINE = 0x00
IP_TOS_PREC_PRIORITY = 0x20
IP_TOS_PREC_IMMEDIATE = 0x40
IP_TOS_PREC_FLASH = 0x60
IP_TOS_PREC_FLASHOVERRIDE = 0x80
IP_TOS_PREC_CRITIC_ECP = 0xa0
IP_TOS_PREC_INTERNETCONTROL = 0xc0
IP_TOS_PREC_NETCONTROL = 0xe0
# Fragmentation flags (ip_off)
IP_RF = 0x8000 # reserved
IP_DF = 0x4000 # don't fragment
IP_MF = 0x2000 # more fragments (not last frag)
IP_OFFMASK = 0x1fff # mask for fragment offset
# Time-to-live (ip_ttl), seconds
IP_TTL_DEFAULT = 64 # default ttl, RFC 1122, RFC 1340
IP_TTL_MAX = 255 # maximum ttl
# Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers
IP_PROTO_IP = 0 # dummy for IP
IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options
IP_PROTO_ICMP = 1 # ICMP
IP_PROTO_IGMP = 2 # IGMP
IP_PROTO_GGP = 3 # gateway-gateway protocol
IP_PROTO_IPIP = 4 # IP in IP
IP_PROTO_ST = 5 # ST datagram mode
IP_PROTO_TCP = 6 # TCP
IP_PROTO_CBT = 7 # CBT
IP_PROTO_EGP = 8 # exterior gateway protocol
IP_PROTO_IGP = 9 # interior gateway protocol
IP_PROTO_BBNRCC = 10 # BBN RCC monitoring
IP_PROTO_NVP = 11 # Network Voice Protocol
IP_PROTO_PUP = 12 # PARC universal packet
IP_PROTO_ARGUS = 13 # ARGUS
IP_PROTO_EMCON = 14 # EMCON
IP_PROTO_XNET = 15 # Cross Net Debugger
IP_PROTO_CHAOS = 16 # Chaos
IP_PROTO_UDP = 17 # UDP
IP_PROTO_MUX = 18 # multiplexing
IP_PROTO_DCNMEAS = 19 # DCN measurement
IP_PROTO_HMP = 20 # Host Monitoring Protocol
IP_PROTO_PRM = 21 # Packet Radio Measurement
IP_PROTO_IDP = 22 # Xerox NS IDP
IP_PROTO_TRUNK1 = 23 # Trunk-1
IP_PROTO_TRUNK2 = 24 # Trunk-2
IP_PROTO_LEAF1 = 25 # Leaf-1
IP_PROTO_LEAF2 = 26 # Leaf-2
IP_PROTO_RDP = 27 # "Reliable Datagram" proto
IP_PROTO_IRTP = 28 # Inet Reliable Transaction
IP_PROTO_TP = 29 # ISO TP class 4
IP_PROTO_NETBLT = 30 # Bulk Data Transfer
IP_PROTO_MFPNSP = 31 # MFE Network Services
IP_PROTO_MERITINP = 32 # Merit Internodal Protocol
IP_PROTO_SEP = 33 # Sequential Exchange proto
IP_PROTO_3PC = 34 # Third Party Connect proto
IP_PROTO_IDPR = 35 # Interdomain Policy Route
IP_PROTO_XTP = 36 # Xpress Transfer Protocol
IP_PROTO_DDP = 37 # Datagram Delivery Proto
IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans
IP_PROTO_TPPP = 39 # TP++ Transport Protocol
IP_PROTO_IL = 40 # IL Transport Protocol
IP_PROTO_IP6 = 41 # IPv6
IP_PROTO_SDRP = 42 # Source Demand Routing
IP_PROTO_ROUTING = 43 # IPv6 routing header
IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header
IP_PROTO_RSVP = 46 # Reservation protocol
IP_PROTO_GRE = 47 # General Routing Encap
IP_PROTO_MHRP = 48 # Mobile Host Routing
IP_PROTO_ENA = 49 # ENA
IP_PROTO_ESP = 50 # Encap Security Payload
IP_PROTO_AH = 51 # Authentication Header
IP_PROTO_INLSP = 52 # Integated Net Layer Sec
IP_PROTO_SWIPE = 53 # SWIPE
IP_PROTO_NARP = 54 # NBMA Address Resolution
IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004
IP_PROTO_TLSP = 56 # Transport Layer Security
IP_PROTO_SKIP = 57 # SKIP
IP_PROTO_ICMP6 = 58 # ICMP for IPv6
IP_PROTO_NONE = 59 # IPv6 no next header
IP_PROTO_DSTOPTS = 60 # IPv6 destination options
IP_PROTO_ANYHOST = 61 # any host internal proto
IP_PROTO_CFTP = 62 # CFTP
IP_PROTO_ANYNET = 63 # any local network
IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK
IP_PROTO_KRYPTOLAN = 65 # Kryptolan
IP_PROTO_RVD = 66 # MIT Remote Virtual Disk
IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core
IP_PROTO_DISTFS = 68 # any distributed fs
IP_PROTO_SATMON = 69 # SATNET Monitoring
IP_PROTO_VISA = 70 # VISA Protocol
IP_PROTO_IPCV = 71 # Inet Packet Core Utility
IP_PROTO_CPNX = 72 # Comp Proto Net Executive
IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat
IP_PROTO_WSN = 74 # Wang Span Network
IP_PROTO_PVP = 75 # Packet Video Protocol
IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor
IP_PROTO_SUNND = 77 # SUN ND Protocol
IP_PROTO_WBMON = 78 # WIDEBAND Monitoring
IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK
IP_PROTO_EON = 80 # ISO CNLP
IP_PROTO_VMTP = 81 # Versatile Msg Transport
IP_PROTO_SVMTP = 82 # Secure VMTP
IP_PROTO_VINES = 83 # VINES
IP_PROTO_TTP = 84 # TTP
IP_PROTO_NSFIGP = 85 # NSFNET-IGP
IP_PROTO_DGP = 86 # Dissimilar Gateway Proto
IP_PROTO_TCF = 87 # TCF
IP_PROTO_EIGRP = 88 # EIGRP
IP_PROTO_OSPF = 89 # Open Shortest Path First
IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol
IP_PROTO_LARP = 91 # Locus Address Resolution
IP_PROTO_MTP = 92 # Multicast Transport Proto
IP_PROTO_AX25 = 93 # AX.25 Frames
IP_PROTO_IPIPENCAP = 94 # yet-another IP encap
IP_PROTO_MICP = 95 # Mobile Internet Ctrl
IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto
IP_PROTO_ETHERIP = 97 # Ethernet in IPv4
IP_PROTO_ENCAP = 98 # encapsulation header
IP_PROTO_ANYENC = 99 # private encryption scheme
IP_PROTO_GMTP = 100 # GMTP
IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto
IP_PROTO_PNNI = 102 # PNNI over IP
IP_PROTO_PIM = 103 # Protocol Indep Multicast
IP_PROTO_ARIS = 104 # ARIS
IP_PROTO_SCPS = 105 # SCPS
IP_PROTO_QNX = 106 # QNX
IP_PROTO_AN = 107 # Active Networks
IP_PROTO_IPCOMP = 108 # IP Payload Compression
IP_PROTO_SNP = 109 # Sitara Networks Protocol
IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol
IP_PROTO_IPXIP = 111 # IPX in IP
IP_PROTO_VRRP = 112 # Virtual Router Redundancy
IP_PROTO_PGM = 113 # PGM Reliable Transport
IP_PROTO_ANY0HOP = 114 # 0-hop protocol
IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto
IP_PROTO_DDX = 116 # D-II Data Exchange (DDX)
IP_PROTO_IATP = 117 # Interactive Agent Xfer
IP_PROTO_STP = 118 # Schedule Transfer Proto
IP_PROTO_SRP = 119 # SpectraLink Radio Proto
IP_PROTO_UTI = 120 # UTI
IP_PROTO_SMP = 121 # Simple Message Protocol
IP_PROTO_SM = 122 # SM
IP_PROTO_PTP = 123 # Performance Transparency
IP_PROTO_ISIS = 124 # ISIS over IPv4
IP_PROTO_FIRE = 125 # FIRE
IP_PROTO_CRTP = 126 # Combat Radio Transport
IP_PROTO_CRUDP = 127 # Combat Radio UDP
IP_PROTO_SSCOPMCE = 128 # SSCOPMCE
IP_PROTO_IPLT = 129 # IPLT
IP_PROTO_SPS = 130 # Secure Packet Shield
IP_PROTO_PIPE = 131 # Private IP Encap in IP
IP_PROTO_SCTP = 132 # Stream Ctrl Transmission
IP_PROTO_FC = 133 # Fibre Channel
IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE
IP_PROTO_RAW = 255 # Raw IP packets
IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved
IP_PROTO_MAX = 255
# XXX - auto-load IP dispatch table from IP_PROTO_* definitions
def __load_protos():
g = globals()
for k, v in g.iteritems():
if k.startswith('IP_PROTO_'):
name = k[9:].lower()
try:
mod = __import__(name, g)
except ImportError:
continue
IP.set_proto(v, getattr(mod, name.upper()))
if not IP._protosw:
__load_protos()
if __name__ == '__main__':
import unittest
class IPTestCase(unittest.TestCase):
def test_IP(self):
import udp
s = 'E\x00\x00"\x00\x00\x00\x00@\x11r\xc0\x01\x02\x03\x04\x01\x02\x03\x04\x00o\x00\xde\x00\x0e\xbf5foobar'
ip = IP(id=0, src='\x01\x02\x03\x04', dst='\x01\x02\x03\x04', p=17)
u = udp.UDP(sport=111, dport=222)
u.data = 'foobar'
u.ulen += len(u.data)
ip.data = u
ip.len += len(u)
self.failUnless(str(ip) == s)
ip = IP(s)
self.failUnless(str(ip) == s)
self.failUnless(ip.udp.sport == 111)
self.failUnless(ip.udp.data == 'foobar')
def test_hl(self):
s = 'BB\x03\x00\x00\x00\x00\x00\x00\x00\xd0\x00\xec\xbc\xa5\x00\x00\x00\x03\x80\x00\x00\xd0\x01\xf2\xac\xa5"0\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00'
try:
ip = IP(s)
except dpkt.UnpackError:
pass
def test_opt(self):
s = '\x4f\x00\x00\x50\xae\x08\x00\x00\x40\x06\x17\xfc\xc0\xa8\x0a\x26\xc0\xa8\x0a\x01\x07\x27\x08\x01\x02\x03\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip = IP(s)
ip.sum = 0
self.failUnless(str(ip) == s)
def test_zerolen(self):
import tcp
d = 'X' * 2048
s = 'E\x00\x00\x004\xce@\x00\x80\x06\x00\x00\x7f\x00\x00\x01\x7f\x00\x00\x01\xccN\x0c8`\xff\xc6N_\x8a\x12\x98P\x18@):\xa3\x00\x00' + d
ip = IP(s)
self.failUnless(isinstance(ip.data, tcp.TCP))
self.failUnless(ip.tcp.data == d)
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Elementwise operators"""
# pylint: disable=redefined-builtin
import tvm
from tvm import te
from . import tag
from . import cpp
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def identity(x):
"""Take identity of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
# pylint: disable=unnecessary-lambda
return te.compute(x.shape, lambda *i: x(*i))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def negative(x):
"""Take negation of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
# pylint: disable=unnecessary-lambda
return te.compute(x.shape, lambda *i: -x(*i))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def exp(x):
"""Take exponential of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.exp(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def erf(x):
"""Take gauss error function of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.erf(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.tanh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def tan(x):
"""Take tan of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.tan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.cos(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def cosh(x):
"""Take cosh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.cosh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sin(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sinh(x):
"""Take sinh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sinh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def acos(x):
"""Take arc cos of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.acos(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def acosh(x):
"""Take arc cosh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.acosh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def asin(x):
"""Take arc sin of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.asin(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def asinh(x):
"""Take arc sinh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.asinh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.atan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def atanh(x):
"""Take atanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.atanh(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def floor(x):
"""Take floor of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.floor(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def ceil(x):
"""Take ceil of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.ceil(x(*i)))
def sign(x):
"""Returns -1, 0, 1 based on sign of x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.sign(x)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def trunc(x):
"""Take truncated value of the input of x, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.trunc(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def abs(x):
"""Take absolute value of the input of x, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.abs(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isnan(x):
"""Check if value of x is NaN, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isnan(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isfinite(x):
"""Check if value of x is finite, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isfinite(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def isinf(x):
"""Check if value of x is infinite, element-wise.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.isinf(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def round(x):
"""Round elements of x to nearest integer.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.round(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log(x):
"""Take logarithm of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log2(x):
"""Take logarithm to the base 2 of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log2(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def log10(x):
"""Take logarithm to the base 10 of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.log10(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sqrt(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def rsqrt(x):
"""Take inverse square root of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.rsqrt(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def sigmoid(x):
"""Take sigmoid tanh of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: te.sigmoid(x(*i)))
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def left_shift(x, n):
"""Take n bits left shift of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
n : int
Number of bits.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: x(*i) << n)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def right_shift(x, n):
"""Take n bits right shift of input x.
Parameters
----------
x : tvm.te.Tensor
Input argument.
n : int
Number of bits.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return te.compute(x.shape, lambda *i: x(*i) >> n)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def clip(x, a_min, a_max):
"""Clip (limit) the values in an array. Given an interval, values
outside the interval are clipped to the interval edges.
Parameters
----------
x : tvm.te.Tensor
Input argument.
a_min : int or float
Minimum value.
a_max : int or float
Maximum value.
Returns
-------
y : tvm.te.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
const_min = tvm.tir.const(a_min, value.dtype)
const_max = tvm.tir.const(a_max, value.dtype)
return tvm.te.max(tvm.te.min(value, const_max), const_min)
return te.compute(x.shape, _compute)
@tvm.te.tag_scope(tag=tag.ELEMWISE)
def fixed_point_multiply(x, multiplier, shift):
"""Fixed point multiplication between data and a fixed point
constant expressed as multiplier * 2^(-shift), where multiplier
is a Q-number with 31 fractional bits
Parameters
----------
x : tvm.te.Tensor or Expr
Input argument.
multiplier : int
Multiplier of a fixed floating point number described as multiplier*2^(-shift).
shift : int
Shift of a fixed floating point number described as multiplier*2^(-shift).
Returns
-------
y : tvm.te.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
return tvm.tir.q_multiply_shift(
value,
tvm.tir.const(multiplier, "int32"),
tvm.tir.const(31, "int32"),
tvm.tir.const(shift, "int32"),
)
return te.compute(x.shape, _compute)
def cast(x, dtype):
"""Cast input to specified data type.
Parameters
----------
x : tvm.te.Tensor or Expr
Input argument.
dtype : str
Data type.
Returns
-------
y : tvm.te.Tensor
The result.
"""
if isinstance(x, te.tensor.Tensor):
return te.compute(x.shape, lambda *i: x(*i).astype(dtype), tag=tag.ELEMWISE)
# pylint: disable=import-outside-toplevel
from tvm.tir import _ffi_api
return _ffi_api._cast(dtype, x)
def reinterpret(x, dtype):
"""Reinterpret input to specified data type.
Parameters
----------
x : tvm.te.Tensor
Input argument.
dtype : str
Data type.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.reinterpret(x, dtype)
def fast_exp(x):
"""Take exponential of input x using fast_exp implementation
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_exp(x, x.dtype, tag.ELEMWISE)
def fast_tanh(x):
"""Take tanhonential of input x using fast_tanh implementation
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_tanh(x, x.dtype, tag.ELEMWISE)
def fast_erf(x):
"""Take gauss error function of input x using fast_erf implementation.
Parameters
----------
x : tvm.te.Tensor
Input argument.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.fast_erf(x, x.dtype, tag.ELEMWISE)
|
|
# Copyright 2022 The CLU Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perodic actions."""
import tempfile
import time
from unittest import mock
from absl.testing import parameterized
from clu import periodic_actions
import tensorflow as tf
class ReportProgressTest(tf.test.TestCase, parameterized.TestCase):
def test_every_steps(self):
hook = periodic_actions.ReportProgress(
every_steps=4, every_secs=None, num_train_steps=10)
t = time.time()
with self.assertLogs(level="INFO") as logs:
self.assertFalse(hook(1, t))
t += 0.11
self.assertFalse(hook(2, t))
t += 0.13
self.assertFalse(hook(3, t))
t += 0.12
self.assertTrue(hook(4, t))
# We did 1 step every 0.12s => 8.333 steps/s.
self.assertEqual(logs.output, [
"INFO:absl:Setting work unit notes: 8.3 steps/s, 40.0% (4/10), ETA: 0m"
])
def test_every_secs(self):
hook = periodic_actions.ReportProgress(
every_steps=None, every_secs=0.3, num_train_steps=10)
t = time.time()
with self.assertLogs(level="INFO") as logs:
self.assertFalse(hook(1, t))
t += 0.11
self.assertFalse(hook(2, t))
t += 0.13
self.assertFalse(hook(3, t))
t += 0.12
self.assertTrue(hook(4, t))
# We did 1 step every 0.12s => 8.333 steps/s.
self.assertEqual(logs.output, [
"INFO:absl:Setting work unit notes: 8.3 steps/s, 40.0% (4/10), ETA: 0m"
])
def test_without_num_train_steps(self):
report = periodic_actions.ReportProgress(every_steps=2)
t = time.time()
with self.assertLogs(level="INFO") as logs:
self.assertFalse(report(1, t))
self.assertTrue(report(2, t + 0.12))
# We did 1 step in 0.12s => 8.333 steps/s.
self.assertEqual(logs.output, [
"INFO:absl:Setting work unit notes: 8.3 steps/s"
])
def test_unknown_cardinality(self):
report = periodic_actions.ReportProgress(
every_steps=2,
num_train_steps=tf.data.UNKNOWN_CARDINALITY)
t = time.time()
with self.assertLogs(level="INFO") as logs:
self.assertFalse(report(1, t))
self.assertTrue(report(2, t + 0.12))
# We did 1 step in 0.12s => 8.333 steps/s.
self.assertEqual(logs.output, [
"INFO:absl:Setting work unit notes: 8.3 steps/s"
])
def test_called_every_step(self):
hook = periodic_actions.ReportProgress(every_steps=3, num_train_steps=10)
t = time.time()
with self.assertRaisesRegex(
ValueError, "PeriodicAction must be called after every step"):
hook(1, t)
hook(11, t) # Raises exception.
@parameterized.named_parameters(
("_nowait", False),
("_wait", True),
)
@mock.patch("time.time")
def test_named(self, wait_jax_async_dispatch, mock_time):
mock_time.return_value = 0
hook = periodic_actions.ReportProgress(
every_steps=1, every_secs=None, num_train_steps=10)
def _wait():
# Here we depend on hook._executor=ThreadPoolExecutor(max_workers=1)
hook._executor.submit(lambda: None).result()
self.assertFalse(hook(1)) # Never triggers on first execution.
with hook.timed("test1", wait_jax_async_dispatch):
_wait()
mock_time.return_value = 1
_wait()
with hook.timed("test2", wait_jax_async_dispatch):
_wait()
mock_time.return_value = 2
_wait()
with hook.timed("test1", wait_jax_async_dispatch):
_wait()
mock_time.return_value = 3
_wait()
mock_time.return_value = 4
with self.assertLogs(level="INFO") as logs:
self.assertTrue(hook(2))
self.assertEqual(logs.output, [
"INFO:absl:Setting work unit notes: 0.2 steps/s, 20.0% (2/10), ETA: 0m"
" (0m : 50.0% test1, 25.0% test2)"
])
class DummyProfilerSession:
"""Dummy Profiler that records the steps at which sessions started/ended."""
def __init__(self):
self.step = None
self.start_session_call_steps = []
self.end_session_call_steps = []
def start_session(self):
self.start_session_call_steps.append(self.step)
def end_session_and_get_url(self, tag):
del tag
self.end_session_call_steps.append(self.step)
class ProfileTest(tf.test.TestCase):
@mock.patch.object(periodic_actions, "profiler", autospec=True)
@mock.patch("time.time")
def test_every_steps(self, mock_time, mock_profiler):
start_steps = []
stop_steps = []
step = 0
def add_start_step(logdir):
del logdir # unused
start_steps.append(step)
def add_stop_step():
stop_steps.append(step)
mock_profiler.start.side_effect = add_start_step
mock_profiler.stop.side_effect = add_stop_step
hook = periodic_actions.Profile(
logdir=tempfile.mkdtemp(),
num_profile_steps=2,
profile_duration_ms=2_000,
first_profile=3,
every_steps=7)
for step in range(1, 18):
mock_time.return_value = step - 0.5 if step == 9 else step
hook(step)
self.assertAllEqual([3, 7, 14], start_steps)
# Note: profiling 7..10 instead of 7..9 because 7..9 took only 1.5 seconds.
self.assertAllEqual([5, 10, 16], stop_steps)
class ProfileAllHostsTest(tf.test.TestCase):
@mock.patch.object(periodic_actions, "profiler", autospec=True)
def test_every_steps(self, mock_profiler):
start_steps = []
step = 0
def profile_collect(logdir, callback, hosts, duration_ms):
del logdir, callback, hosts, duration_ms # unused
start_steps.append(step)
mock_profiler.collect.side_effect = profile_collect
hook = periodic_actions.ProfileAllHosts(
logdir=tempfile.mkdtemp(),
profile_duration_ms=2_000,
first_profile=3,
every_steps=7)
for step in range(1, 18):
hook(step)
self.assertAllEqual([3, 7, 14], start_steps)
class PeriodicCallbackTest(tf.test.TestCase):
def test_every_steps(self):
callback = mock.Mock()
hook = periodic_actions.PeriodicCallback(
every_steps=2, callback_fn=callback)
for step in range(1, 10):
hook(step, 3, remainder=step % 3)
expected_calls = [
mock.call(remainder=2, step=2, t=3),
mock.call(remainder=1, step=4, t=3),
mock.call(remainder=0, step=6, t=3),
mock.call(remainder=2, step=8, t=3)
]
self.assertListEqual(expected_calls, callback.call_args_list)
@mock.patch("time.time")
def test_every_secs(self, mock_time):
callback = mock.Mock()
hook = periodic_actions.PeriodicCallback(every_secs=2, callback_fn=callback)
for step in range(1, 10):
mock_time.return_value = float(step)
hook(step, remainder=step % 5)
# Note: time will be initialized at 1 so hook runs at steps 4 & 7.
expected_calls = [
mock.call(remainder=4, step=4, t=4.0),
mock.call(remainder=2, step=7, t=7.0)
]
self.assertListEqual(expected_calls, callback.call_args_list)
def test_on_steps(self):
callback = mock.Mock()
hook = periodic_actions.PeriodicCallback(on_steps=[8], callback_fn=callback)
for step in range(1, 10):
hook(step, remainder=step % 3)
callback.assert_called_once_with(remainder=2, step=8, t=mock.ANY)
def test_async_execution(self):
out = []
def cb(step, t):
del t
out.append(step)
hook = periodic_actions.PeriodicCallback(
every_steps=1, callback_fn=cb, execute_async=True)
hook(0)
hook(1)
hook(2)
hook(3)
# Block till all the hooks have finished.
hook.get_last_callback_result().result()
# Check order of execution is preserved.
self.assertListEqual(out, [0, 1, 2, 3])
def test_error_async_is_forwarded(self):
def cb(step, t):
del step
del t
raise Exception
hook = periodic_actions.PeriodicCallback(
every_steps=1, callback_fn=cb, execute_async=True)
hook(0)
with self.assertRaises(Exception):
hook(1)
def test_function_without_step_and_time(self):
# This must be used with pass_step_and_time=False.
def cb():
return 5
hook = periodic_actions.PeriodicCallback(
every_steps=1, callback_fn=cb, pass_step_and_time=False)
hook(0)
hook(1)
self.assertEqual(hook.get_last_callback_result(), 5)
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python
import sys, os, argparse
from numpy import shape, reshape, \
array, zeros, zeros_like, ones, ones_like, arange, \
double, \
int8, int16, int32, int64, uint8, uint16, uint32, uint64, \
uint, \
iinfo, isscalar, \
unique, \
where, unravel_index, newaxis, \
ceil, floor, prod, cumprod, \
concatenate, \
ndarray, minimum, bincount, dot, nonzero, concatenate, \
setdiff1d, inf, flatnonzero
import itertools
import re
from collections import defaultdict, deque as queue
from scipy.ndimage import filters, grey_dilation, generate_binary_structure, \
maximum_filter, minimum_filter
from scipy.ndimage import distance_transform_cdt
from scipy.ndimage.measurements import label, find_objects
from scipy.ndimage.morphology import binary_opening, binary_closing, \
binary_dilation, grey_opening, grey_closing, \
generate_binary_structure, iterate_structure
#from scipy.spatial.distance import cityblock as manhattan_distance
import iterprogress as ip
try:
import skimage.morphology
skimage_available = True
except ImportError:
logging.warning('Unable to load skimage.')
skimage_available = False
zero3d = array([0,0,0])
def manhattan_distance(a, b):
return sum(abs(a-b))
def diamond_se(radius, dimension):
se = generate_binary_structure(dimension, 1)
return iterate_structure(se, radius)
def complement(a):
return a.max()-a
def morphological_reconstruction(marker, mask, connectivity=1):
"""Perform morphological reconstruction of the marker into the mask.
See the Matlab image processing toolbox documentation for details:
http://www.mathworks.com/help/toolbox/images/f18-16264.html
"""
sel = generate_binary_structure(marker.ndim, connectivity)
diff = True
while diff:
markernew = grey_dilation(marker, footprint=sel)
markernew = minimum(markernew, mask)
diff = (markernew-marker).max() > 0
marker = markernew
return marker
def hminima(a, thresh):
"""Suppress all minima that are shallower than thresh."""
maxval = a.max()
ainv = maxval-a
return maxval - morphological_reconstruction(ainv-thresh, ainv)
imhmin = hminima
def remove_small_connected_components(a, min_size=64, in_place=False):
original_dtype = a.dtype
if a.dtype == bool:
a = label(a)[0]
elif not in_place:
a = a.copy()
if min_size == 0: # shortcut for efficiency
return a
component_sizes = bincount(a.ravel())
too_small = component_sizes < min_size
too_small_locations = too_small[a]
a[too_small_locations] = 0
return a.astype(original_dtype)
def regional_minima(a, connectivity=1):
"""Find the regional minima in an ndarray."""
values = unique(a)
delta = (values - minimum_filter(values, footprint=ones(3)))[1:].min()
marker = complement(a)
mask = marker+delta
return marker == morphological_reconstruction(marker, mask, connectivity)
def impose_minima(a, minima, connectivity=1):
"""Transform 'a' so that its only regional minima are those in 'minima'.
Parameters:
'a': an ndarray
'minima': a boolean array of same shape as 'a'
'connectivity': the connectivity of the structuring element used in
morphological reconstruction.
Value:
an ndarray of same shape as a with unmarked local minima paved over.
"""
m = a.max()
mask = m - a
marker = zeros_like(mask)
minima = minima.astype(bool)
marker[minima] = mask[minima]
return m - morphological_reconstruction(marker, mask, connectivity)
def refined_seeding(a, maximum_height=0, grey_close_radius=1,
binary_open_radius=1, binary_close_radius=1, minimum_size=0):
"""Perform morphological operations to get good segmentation seeds."""
if grey_close_radius > 0:
strel = diamond_se(grey_close_radius, a.ndim)
a = grey_closing(a, footprint=strel)
s = (a <= maximum_height)
if binary_open_radius > 0:
strel = diamond_se(binary_open_radius, s.ndim)
s = binary_opening(s, structure=strel)
if binary_close_radius > 0:
strel = diamond_se(binary_close_radius, s.ndim)
s = binary_closing(s, structure=strel)
s = remove_small_connected_components(s, minimum_size)
return label(s)[0]
def minimum_seeds(current_seeds, min_seed_coordinates, connectivity=1):
"""Ensure that each point in given coordinates has its own seed."""
seeds = current_seeds.copy()
sel = generate_binary_structure(seeds.ndim, connectivity)
if seeds.dtype == bool:
seeds = label(seeds, sel)[0]
new_seeds = grey_dilation(seeds, footprint=sel)
overlap = new_seeds[min_seed_coordinates]
seed_overlap_counts = bincount(concatenate((overlap, unique(seeds)))) - 1
seeds_to_delete = (seed_overlap_counts > 1)[seeds]
seeds[seeds_to_delete] = 0
seeds_to_add = [m[overlap==0] for m in min_seed_coordinates]
start = seeds.max() + 1
num_seeds = len(seeds_to_add[0])
seeds[seeds_to_add] = arange(start, start + num_seeds)
return seeds
def split_exclusions(image, labels, exclusions, dilation=0, connectivity=1):
"""Ensure that no segment in 'labels' overlaps more than one exclusion."""
labels = labels.copy()
cur_label = labels.max() + 1
dilated_exclusions = exclusions.copy()
foot = generate_binary_structure(exclusions.ndim, connectivity)
for i in range(dilation):
dilated_exclusions = grey_dilation(exclusions, footprint=foot)
while True:
hashed = labels * (exclusions.max() + 1) + exclusions
hashed[exclusions == 0] = 0
violations = bincount(hashed.ravel()) > 1
violations[0] = False
if sum(violations) == 0:
break
offending_label = labels[violations[hashed]][0]
offended_exclusion = exclusions[violations[hashed]][0]
mask = labels == offending_label
seeds, n = label(mask * (dilated_exclusions == offended_exclusion))
seeds[seeds > 1] += cur_label
cur_label += n-1
seeds[seeds == 1] = offending_label
labels[mask] = watershed(image, seeds, connectivity, mask)[mask]
return labels
def watershed(a, seeds=None, connectivity=1, mask=None, smooth_thresh=0.0,
smooth_seeds=False, minimum_seed_size=0, dams=False,
override_skimage=False, show_progress=False):
"""Perform the watershed algorithm of Vincent & Soille (1991)."""
seeded = seeds is not None
sel = generate_binary_structure(a.ndim, connectivity)
b = a
if not seeded:
seeds = regional_minima(a, connectivity)
if seeds.dtype == bool:
seeds = label(seeds, sel)[0]
if smooth_seeds:
seeds = binary_opening(seeds, sel)
if smooth_thresh > 0.0:
b = hminima(a, smooth_thresh)
if skimage_available and not override_skimage and not dams:
return skimage.morphology.watershed(b, seeds, sel, None, mask)
elif seeded:
b = impose_minima(a, seeds.astype(bool), connectivity)
levels = unique(b)
a = pad(a, a.max()+1)
b = pad(b, b.max()+1)
ar = a.ravel()
br = b.ravel()
ws = pad(seeds, 0)
wsr = ws.ravel()
current_label = 0
neighbors = build_neighbors_array(a, connectivity)
level_pixels = build_levels_dict(b)
if show_progress: wspbar = ip.StandardProgressBar('Watershed...')
else: wspbar = ip.NoProgressBar()
for i, level in ip.with_progress(enumerate(levels),
pbar=wspbar, length=len(levels)):
idxs_adjacent_to_labels = queue([idx for idx in level_pixels[level] if
any(wsr[neighbors[idx]])])
while len(idxs_adjacent_to_labels) > 0:
idx = idxs_adjacent_to_labels.popleft()
if wsr[idx] > 0: continue # in case we already processed it
nidxs = neighbors[idx] # neighbors
lnidxs = nidxs[(wsr[nidxs] != 0).astype(bool)] # labeled neighbors
adj_labels = unique(wsr[lnidxs])
if len(adj_labels) == 1 or len(adj_labels) > 1 and not dams:
# assign a label
wsr[idx] = wsr[lnidxs][ar[lnidxs].argmin()]
idxs_adjacent_to_labels.extend(nidxs[((wsr[nidxs] == 0) *
(br[nidxs] == level)).astype(bool) ])
return juicy_center(ws)
def manual_split(probs, seg, body, seeds, connectivity=1, boundary_seeds=None):
"""Manually split a body from a segmentation using seeded watershed.
Input:
- probs: the probability of boundary in the volume given.
- seg: the current segmentation.
- body: the label to be split.
- seeds: the seeds for the splitting (should be just two labels).
[-connectivity: the connectivity to use for watershed.]
[-boundary_seeds: if not None, these locations become inf in probs.]
Value:
- the segmentation with the selected body split.
"""
struct = generate_binary_structure(seg.ndim, connectivity)
body_pixels = seg == body
bbox = find_objects(body_pixels)[0]
body_pixels = body_pixels[bbox]
body_boundary = binary_dilation(body_pixels, struct) - body_pixels
non_body_pixels = True - body_pixels - body_boundary
probs = probs.copy()[bbox]
probs[non_body_pixels] = probs.min()-1
if boundary_seeds is not None:
probs[boundary_seeds[bbox]] = probs.max()+1
probs[body_boundary] = probs.max()+1
seeds = label(seeds.astype(bool)[bbox], struct)[0]
outer_seed = seeds.max()+1 # should be 3
seeds[non_body_pixels] = outer_seed
seg_new = watershed(probs, seeds,
dams=(seg==0).any(), connectivity=connectivity, show_progress=True)
seg = seg.copy()
new_seeds = unique(seeds)[:-1]
for new_seed, new_label in zip(new_seeds, [0, body, seg.max()+1]):
seg[bbox][seg_new == new_seed] = new_label
return seg
def smallest_int_dtype(number, signed=False, mindtype=None):
if number < 0: signed = True
if not signed:
if number <= iinfo(uint8).max:
return uint8
if number <= iinfo(uint16).max:
return uint16
if number <= iinfo(uint32).max:
return uint32
if number <= iinfo(uint64).max:
return uint64
else:
if iinfo(int8).min <= number <= iinfo(int8).max:
return int8
if iinfo(int16).min <= number <= iinfo(int16).max:
return int16
if iinfo(int32).min <= number <= iinfo(int32).max:
return int32
if iinfo(int64).min <= number <= iinfo(int64).max:
return int64
def _is_container(a):
try:
n = len(a)
return True
except TypeError:
return False
def pad(ar, vals, axes=None):
if ar.size == 0:
return ar
if axes is None:
axes = range(ar.ndim)
if not _is_container(vals):
vals = [vals]
if not _is_container(axes):
axes = [axes]
padding_thickness = len(vals)
newshape = array(ar.shape)
for ax in axes:
newshape[ax] += 2
vals = array(vals)
if ar.dtype == double or ar.dtype == float:
new_dtype = double
elif ar.dtype == bool:
new_dtype = bool
else:
maxval = max([vals.max(), ar.max()])
minval = min([vals.min(), ar.min()])
if abs(minval) > maxval:
signed = True
extremeval = minval
else:
if minval < 0:
signed = True
else:
signed = False
extremeval = maxval
new_dtype = max([smallest_int_dtype(extremeval, signed), ar.dtype])
ar2 = zeros(newshape, dtype=new_dtype)
center = ones(newshape, dtype=bool)
for ax in axes:
ar2.swapaxes(0,ax)[0,...] = vals[0]
ar2.swapaxes(0,ax)[-1,...] = vals[0]
center.swapaxes(0,ax)[0,...] = False
center.swapaxes(0,ax)[-1,...] = False
ar2[center] = ar.ravel()
if padding_thickness == 1:
return ar2
else:
return pad(ar2, vals[1:], axes)
def juicy_center(ar, skinsize=1):
for i in xrange(ar.ndim):
ar = ar.swapaxes(0,i)
ar = ar[skinsize:-skinsize]
ar = ar.swapaxes(0,i)
return ar.copy()
def surfaces(ar, skinsize=1):
s = []
for i in xrange(ar.ndim):
ar = ar.swapaxes(0, i)
s.append(ar[0:skinsize].copy())
s.append(ar[-skinsize:].copy())
ar = ar.swapaxes(0, i)
return s
def hollowed(ar, skinsize=1):
"""Return a copy of ar with the center zeroed out.
'skinsize' determines how thick of a crust to leave untouched.
"""
slices = (slice(skinsize, -skinsize),)*ar.ndim
ar_out = ar.copy()
ar_out[slices] = 0
return ar_out
def build_levels_dict(a):
d = defaultdict(list)
for loc,val in enumerate(a.ravel()):
d[val].append(loc)
return d
def build_neighbors_array(ar, connectivity=1):
idxs = arange(ar.size, dtype=uint32)
return get_neighbor_idxs(ar, idxs, connectivity)
def get_neighbor_idxs(ar, idxs, connectivity=1):
if isscalar(idxs): # in case only a single idx is given
idxs = [idxs]
idxs = array(idxs) # in case a list or other array-like is given
strides = array(ar.strides)/ar.itemsize
if connectivity == 1:
steps = (strides, -strides)
else:
steps = []
for i in range(1,connectivity+1):
prod = array(list(itertools.product(*([[1,-1]]*i))))
i_strides = array(list(itertools.combinations(strides,i))).T
steps.append(prod.dot(i_strides).ravel())
return idxs[:,newaxis] + concatenate(steps)
def orphans(a):
"""Find all the segments that do not touch the volume boundary.
This function differs from agglo.Rag.orphans() in that it does not use the
graph, but rather computes orphans directly from a volume.
"""
return setdiff1d(
unique(a), unique(concatenate([s.ravel() for s in surfaces(a)]))
)
def non_traversing_segments(a):
"""Find segments that enter the volume but do not leave it elsewhere."""
if a.all():
a = damify(a)
surface = hollowed(a)
surface_ccs = label(surface)[0]
idxs = flatnonzero(surface)
pairs = unique(zip(surface.ravel()[idxs], surface_ccs.ravel()[idxs]))
return flatnonzero(bincount(pairs.astype(int)[:,0])==1)
def damify(a, in_place=False):
"""Add dams to a borderless segmentation."""
if not in_place:
b = a.copy()
b[seg_to_bdry(a)] = 0
return b
def seg_to_bdry(seg, connectivity=1):
"""Given a borderless segmentation, return the boundary map."""
strel = generate_binary_structure(seg.ndim, connectivity)
return maximum_filter(seg, footprint=strel) != \
minimum_filter(seg, footprint=strel)
def undam(seg):
""" Assign zero-dams to nearest non-zero region. """
bdrymap = seg==0
k = distance_transform_cdt(bdrymap, return_indices=True)
ind = nonzero(bdrymap.ravel())[0]
closest_sub = concatenate([i.ravel()[:,newaxis] for i in k[1]],axis=1)
closest_sub = closest_sub[ind,:]
closest_ind = [
dot(bdrymap.strides, i)/bdrymap.itemsize for i in closest_sub]
sp = seg.shape
seg = seg.ravel()
seg[ind] = seg[closest_ind]
seg = reshape(seg, sp)
return seg
if __name__ == '__main__':
pass
|
|
from flask import *
import pymongo
import bson
import json
import jsonschema
import yawparser
import datetime
import traceback
import uuid
import os
import werkzeug
from functools import wraps
DB_URI = 'mongodb://localhost:27017/'
DB_NAME = 'weatherapp_db'
WEATHER_UPDATE_PERIOD = datetime.timedelta(minutes=1)
db_client = pymongo.MongoClient(DB_URI)
db = db_client[DB_NAME]
app = Flask(__name__)
###
def request_wants_json():
best = request.accept_mimetypes.best_match(['application/json', 'text/html'])
return best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']
### Error handlers
@app.errorhandler(500)
def page_not_found(e):
print "\n\n",traceback.format_exc(),"\n\n"
return 'Internal server error', 500
### Main part of API
@app.route('/api/v0/weather',methods=['GET'])
def weather1():
city_id = request.args.get('city_id',None)
return weather(city_id=city_id)
@app.route('/api/v0/weather/<int:city_id>',methods=['GET'])
def weather(city_id=None):
if city_id == None:
abort(418)
city_id = int(city_id)
weather_state = None
weather_state_old = None
weather = db.weather_cache.find_one({'city_id':city_id})
if weather == None:
weather = {'city_id':city_id}
elif 'updated' in weather:
if (datetime.datetime.now() - weather['updated']) < WEATHER_UPDATE_PERIOD:
weather_state = weather.get('state',None)
else:
weather_state_old = weather.get('state',None)
if weather_state == None:
print '--- Downloading weather info for city',str(city_id),'...'
weather_state = yawparser.parse_weather_info(city_id=city_id)
if weather_state == None:
if weather_state_old != None:
weather_state = weather_state_old
weather_state['outOfDate'] = True
else:
abort(404)
else:
weather['updated'] = datetime.datetime.now()
weather['state'] = weather_state
db.weather_cache.save(weather)
return jsonify(weather_state)
@app.route('/api/v0/cloth/items/<item_id>',methods=['GET'])
def cloth_getitem(item_id=''):
item = db.cloth_items.find_one({'_id':bson.ObjectId(item_id)})
if item == None:
return Response('{}',mimetype='application/json',status=404)
del item['_id']
return jsonify(item)
def check_item_condition(cond,context):
value = context.get(str(cond.get('value',cond.get('variable',''))),'0')
cond_ok = True
if 'is' in cond:
cond_ok = cond_ok and (str(value) == cond['is'])
if 'from' in cond:
cond_ok = cond_ok and (float(value) >= float(cond['from']))
if 'to' in cond:
cond_ok = cond_ok and (float(value) < float(cond['to']))
return cond_ok
def calc_item_weight(item,context):
conditions = item.get('conditions',[])
weight = 0.0
for cond in conditions:
weight += cond.get('weight',0.0) * (1.0 if check_item_condition(cond,context) else 0.0)
return weight
@app.route('/api/v0/cloth/choose',methods=['GET'])
def cloth_choose():
context = {}
context['temperature'] = float(request.args.get('temperature',0))
context['windVelocity'] = float(request.args.get('windVelocity',0))
context['season'] = request.args.get('season','')
itemgroups = {}
for item in db.cloth_items.find():
item = dict(item)
del item['_id']
group = item.get('description',{}).get('group','')
weight = calc_item_weight(item,context)
item['weight'] = weight
if group not in itemgroups:
itemgroups[group] = item
else:
weight = calc_item_weight(item,context)
if itemgroups[group]['weight'] < weight:
itemgroups[group] = item
choosen = []
for k, v in itemgroups.items():
if v['weight'] > 0.01:
choosen += [v.get('description',{'error':'Achtung! Invalid recird in DB.'})]
print '>>>> ',str(choosen)
return jsonify({'choise':choosen})
### Cities DB
@app.route('/api/v0/cities',methods=['GET'])
def cities_getlist():
cities = db.clities_list.find_one()
return jsonify(cities)
### API Calls for debugging/administration
def get_collection_scheme(collection,version='v0'):
meta = db[collection+'.meta'].find_one({'version':version})
schema = meta['schema']
schema['$schema'] = meta['metaschema']
return schema
def requiresAuthentication(view):
@wraps(view)
def wrapper(*args,**kwargs):
cookie = request.cookies.get('session_id','')
if db.sessions.find_one({'session_id':cookie}) == None:
abort(401)
return view(*args,**kwargs)
return wrapper
def new_session(username):
session_id = str(uuid.uuid4())
db.sessions.insert({
'username':username,
'session_id':session_id,
'created':datetime.datetime.now()})
return session_id
@app.route('/api/v0/login',methods=['POST'])
def login():
ct = request.headers.get('Content-Type',None)
if ct in ('application/x-www-form-urlencoded','multipart/form-data'):
return login_form( )
if ct == 'application/json':
return login_json( )
abort(400)
def login_form():
username = request.form['login']
password = request.form['password']
redirect_to = request.form.get('from','/')
resp = redirect(redirect_to)
if db.users.find_one({'name':username,'password':password}) != None:
resp.set_cookie('session_id',new_session(username))
return resp
def login_json():
data = request.get_json()
username = data.get('username','')
password = data.get('password','')
if db.users.find_one({'name':username,'password':password}) != None:
session_id = new_session(username)
resp = jsonify({'status':'OK'})
resp.set_cookie('session_id',session_id)
return resp
abort(401)
@app.route('/api/v0/logout',methods=['POST'])
@requiresAuthentication
def logout():
db.sessions.remove({'session_id':request.cookies['session_id']})
resp = redirect(request.form.get('from','/'))
resp.set_cookie('session_id','',expires=0)
return resp
@app.route('/api/v0/cloth/items',methods=['POST'])
def cloth_post_new():
if request.headers['Content-Type'] in ('application/x-www-form-urlencoded','multipart/form-data'):
return cloth_postitem_form(None)
return cloth_putitem(None)
@app.route('/api/v0/cloth/items/<item_id>',methods=['PUT'])
@requiresAuthentication
def cloth_putitem(item_id=None):
if request.headers['Content-Type'] not in ('application/json',):
abort(401)
jsobj = request.get_json()
jsobj['_id'] = None
jsscheme = get_collection_scheme('cloth_items')
try:
jsonschema.validate(jsobj,jsscheme);
except Exception as e:
return Response(json.dumps({'status':'Validation failed','exception':str(e)}),status=400,mimetype='application/json')
if item_id != None:
jsobj['_id'] = bson.ObjectId(item_id)
db.cloth_items.save(jsobj)
else:
if '_id' in jsobj:
del jsobj['_id']
jsobj['_id'] = db.cloth_items.insert(jsobj)
resp = jsonify({'status':'OK'})
resp.headers['Location'] = '/api/v0/cloth/items/' + str(jsobj['_id'])
return resp
@app.route('/api/v0/cloth/items.schema',methods=['GET'])
def cloth_item_schema():
return jsonify(get_collection_scheme('cloth_items'))
@app.route('/api/v0/cloth/items/<item_id>',methods=['POST'])
@requiresAuthentication
def cloth_postitem_form(item_id=None):
postitem = {'description':{}}
if item_id != None:
postitem['_id'] = bson.ObjectId(item_id)
postitem['description']['name'] = request.form['description.name']
postitem['description']['description'] = request.form['description.description']
postitem['description']['group'] = request.form['description.group']
postitem['description']['img'] = request.form['description.img']
postitem['conditions'] = json.loads(request.form['conditions'])
db.cloth_items.save(postitem)
if request_wants_json():
return jsonify({'status':'OK'})
else:
return redirect(request.form.get('from','/'))
@app.route('/api/v0/cloth/items/<item_id>',methods=['DELETE'])
@requiresAuthentication
def cloth_delitem(item_id):
_id = bson.ObjectId(item_id)
res = db.cloth_items.remove({'_id':_id})
if res.get('n',0) == 0:
abort(404)
return jsonify({'status':'OK'})
@app.route('/api/v0/weather',methods=['POST'])
def weathercache_post_new():
return weathercache_post(int(request.form['city_id']))
@app.route('/api/v0/weather/<int:city_id>',methods=['POST'])
@requiresAuthentication
def weathercache_post(city_id=0):
postrecord={'state':{}}
postrecord['city_id'] = city_id
postrecord['updated'] = datetime.datetime.strptime(request.form['updated'],"%a, %d %b %Y %H:%M:%S +0000")
postrecord['state']['temperature'] = float(request.form['state.temperature'])
postrecord['state']['temperatureFeelsLike'] = float(request.form['state.temperatureFeelsLike'])
postrecord['state']['windVelocity'] = float(request.form['state.windVelocity'])
postrecord['state']['windDirection'] = request.form['state.windDirection']
postrecord['state']['weatherInWords'] = request.form['state.weatherInWords']
postrecord['state']['humidity'] = float(request.form['state.humidity'])
postrecord['state']['weatherThumbnailURL'] = request.form['state.weatherThumbnailURL']
exist_record = db.weather_cache.find_one({'city_id':city_id})
if exist_record != None:
postrecord['_id'] = exist_record['_id']
db.weather_cache.save(postrecord)
if request_wants_json():
return jsonify({'status':'OK'})
else:
return redirect(request.form.get('from','/'))
@app.route('/api/v0/cloth/items',methods=['GET'])
@requiresAuthentication
def cloth_getitems():
query = {}
if 'inname' in request.args:
query['description.name'] = {'$regex':'.*'+request.args['inname']+'.*','$options':'i'}
if 'group' in request.args:
query['description.group'] = request.args['group']
if 'indesc' in request.args:
query['description.description'] = {'$regex':'.*'+request.args['indesc']+'.*','$options':'i'}
qres = db.cloth_items.find(query)
if 'orderby' in request.args:
orderby = request.args['orderby']
if orderby == 'name':
qres = qres.sort([('description.name',1)])
elif orderby == 'group':
qres = qres.sort([('description.group',1)])
else:
abort(400)
if 'page' in request.args or 'count' in request.args:
try:
page = int(request.args.get('page',1))
if page < 1:
page = 1
count = int(request.args.get('count',10))
if count <= 0:
count = 10
if count > 100:
count = 100
qres = qres.skip(count*(page-1)).limit(count)
except:
abort(400)
items = list(qres)
for item in items:
item['_id'] = str(item['_id'])
return jsonify({'items':items})
@app.route('/api/v0/weather/cached',methods=['GET'])
@requiresAuthentication
def weathercache_get():
records = list(db.weather_cache.find())
for rec in records:
del rec['_id']
rec['updated'] = rec['updated'].strftime("%a, %d %b %Y %H:%M:%S +0000")
return jsonify({'records':records})
UPLOAD_DIRECTORY = os.environ.get('UPLOAD_DIRECTORY','../frontend/uploads')
UPLOAD_BASEURL = os.environ.get('UPLOAD_BASEURL','/uploads')
@app.route('/api/v0/uploadfile',methods=['POST'])
@requiresAuthentication
def uploadFile():
_file = request.files['file']
filename = _file.filename
filename = werkzeug.utils.secure_filename(filename)
filepath = os.path.join(UPLOAD_DIRECTORY,filename)
counter = 0
while os.path.isfile(filepath):
filepath = os.path.join(UPLOAD_DIRECTORY,'({counter}) {filename}'.format(**locals()))
counter += 1
_file.save(filepath)
fileurl = UPLOAD_BASEURL+filepath[len(UPLOAD_DIRECTORY):]
return Response('{}',mimetype='application/json',status=201,headers={'Location':fileurl})
if __name__ == '__main__':
app.run( )
|
|
#! -*- coding: utf-8; mode: python -*-
"""
ago.py: interact with an ArcGIS Portal instance
"""
import arcpy
import json
import time
import datetime
import mimetypes
import gzip
import random
import string
import getpass
import sys
import os
from io import BytesIO
import codecs
import uuid
import shutil
try:
import http.client as client
import urllib.parse as parse
from urllib.request import urlopen as urlopen
from urllib.request import Request as request
from urllib.request import HTTPError, URLError
from urllib.parse import urlencode as encode
# py2
except ImportError:
import httplib as client
from urllib2 import urlparse as parse
from urllib2 import urlopen as urlopen
from urllib2 import Request as request
from urllib2 import HTTPError, URLError
from urllib import urlencode as encode
unicode = str
# Valid package types on portal
ITEM_TYPES = {
".LPK": "Layer Package",
".LPKX": "Layer Package",
".MPK": "Map Package",
".MPKX": "Map Package",
".GPK": "Geoprocessing Package",
".GPKX": "Geoprocessing Package",
".RPK": "Rule Package",
".GCPK": "Locator Package",
".PPKX": "Project Package",
".APTX": "Project Template",
".TPK": "Tile Package",
".MMPK": "Mobile Map Package",
".VTPK": "Vector Tile Package"
}
class MultipartFormdataEncoder(object):
"""
Usage: request_headers, request_data =
MultipartFormdataEncoder().encodeForm(params, files)
Inputs:
params = {"f": "json", "token": token, "type": item_type,
"title": title, "tags": tags, "description": description}
files = {"file": {"filename": "some_file.sd", "content": content}}
Note: content = open(file_path, "rb").read()
"""
def __init__(self):
self.boundary = uuid.uuid4().hex
self.content_type = {
"Content-Type": "multipart/form-data; boundary={}".format(self.boundary)
}
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
"""
Yield bytes for body. See class description for usage.
"""
encoder = codecs.getencoder('utf-8')
for key, value in fields.items():
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(
self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for key, value in files.items():
if "filename" in value:
filename = value.get("filename")
content_disp = 'Content-Disposition: form-data;name=' + \
'"{}"; filename="{}"\r\n'.format(key, filename)
content_type = 'Content-Type: {}\r\n'.format(
mimetypes.guess_type(filename)[0] or 'application/octet-stream')
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(content_disp)
yield encoder(content_type)
yield encoder('\r\n')
if "content" in value:
buff = value.get("content")
yield (buff, len(buff))
yield encoder('\r\n')
yield encoder('--{}--\r\n'.format(self.boundary))
def encodeForm(self, fields, files):
body = BytesIO()
for chunk, chunk_len in self.iter(fields, files):
body.write(chunk)
self.content_type["Content-Length"] = str(len(body.getvalue()))
return self.content_type, body.getvalue()
class AGOLHelper(object):
"""
Interact with an ArcGIS Portal instance, such as ArcGIS Online. Must be
initialized with either the login() method, or by reusing an existing
OAuth token via token_login(). Covers approximately 1/3 of the complete
API, primarily focused on the common operations around uploading and
managing services and web maps.
"""
def __init__(self, portal_url=None, token=None, debug=False):
if portal_url is None:
self.portal_url = arcpy.GetActivePortalURL()
else:
self.portal_url = portal_url
# in the absence of information, default to HTTP
self.protocol = 'https'
self.is_arcgis_online = False
url_parts = self._parse_url(self.portal_url)
if url_parts:
if url_parts.scheme:
self.protocol = url_parts.scheme
self.host = self._normalize_host_url(url_parts)
if url_parts.netloc == 'www.arcgis.com':
self.is_arcgis_online = True
self.protocol = 'https'
else:
arcpy.AddError(NO_PORTAL_URL_MSG)
sys.exit()
self.base_url = '{}://{}/sharing/rest'.format(self.protocol, self.host)
self.secure_url = 'https://{}/sharing/rest'.format(self.host)
self.token = token
self.debug = debug
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': ('ago.py -- ArcGIS portal module 0.1')
}
self.portal_name = None
self.portal_info = {}
self.username = None
self.login_method = None
self.expiration = None
self._password = None
def login(self, username=None, password=None, repeat=None):
"""
Get a sign-in token from provided credentials.
Arguments:
username -- user to sign in with
password -- password for user (default: use getpass)
Returns:
None
"""
if username:
self.username = username
else:
arcpy.AddError("Expected user name. None given.")
return
if password is None:
self._password = getpass.getpass()
else:
self._password = password
token_url = '{}/generateToken?'.format(self.secure_url)
token_parameters = {
'username': username,
'password': self._password,
'referer': "http://maps.esri.com",
'expiration': 600,
}
token_response = self.url_request(
token_url, token_parameters, 'POST', repeat=repeat)
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires'] / 1000) - datetime.timedelta(seconds=1)
if 'ssl' in token_response:
if token_response['ssl']:
self.protocol = 'https'
else:
self.protocol = 'http'
# update base information with token
self.information()
self.login_method = 'password'
else:
arcpy.AddError("Unable to get signin token.")
return
def token_login(self):
"""
Get a sign-in token generated from ArcPy.
Arguments:
None
Returns:
None
"""
# NOTE side-effects
token_response = arcpy.GetSigninToken()
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires']) - datetime.timedelta(seconds=1)
if self.debug:
msg = 'Received token starting with ' + \
'"{}", valid for {} minutes.'.format(
self.token[0:10], self.valid_for)
arcpy.AddMessage(msg)
# update base information with token
self.information()
self.login_method = 'token'
else:
arcpy.AddError("Unable to get signin token.")
return
@property
def valid_for(self):
"""
Length the current token is valid for, in minutes.
Returns:
An integer of minutes token remains valid
"""
valid = False
if self.expiration and isinstance(self.expiration, datetime.datetime):
valid = (self.expiration - datetime.datetime.now()).seconds / 60
return valid
def information(self):
"""
Get portal 'self' information.
Arguments:
None
Returns:
A dictionary returned from portals/self.
"""
# NOTE side-effects; do separately
url = '{}/portals/self'.format(self.base_url)
portal_info = self.url_request(url)
self.portal_info = portal_info
self.portal_name = portal_info['portalName']
url = '{}/community/self'.format(self.base_url)
user_info = self.url_request(url)
self.username = user_info['username']
return self.portal_info
def random_string(self, length):
"""
Generate a random string of ASCII letters.
Arguments:
length = number of characters
Returns:
random string
"""
alpha = string.ascii_letters
return ''.join(random.choice(alpha) for ii in range(length + 1))
def encode_multipart_data(self, data, files):
"""
Create multipart boundaries between file streams.
Arguments:
data -- input data
files -- input files
Returns:
A tuple containing response -- (body, headers)
"""
boundary = self.random_string(30)
def get_content_type(filename):
""" Try to determine content type based on file extension."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_field(field_name):
""" Encode fields using multipart specification."""
return('--' + boundary,
'Content-Disposition: form-data; name="%s"' % field_name,
'', str(data[field_name]))
def encode_file(field_name):
""" Encode file data using multipart specification."""
filename = str(files[field_name])
return('--' + boundary,
'Content-Disposition: form-data;'
'name="{}"; filename="{}"'.format(field_name, filename),
'Content-Type: %s' % get_content_type(filename),
'', open(filename, 'rb').read())
lines = []
for name in data:
lines.extend(encode_field(name))
for name in files:
lines.extend(encode_file(name))
lines.extend(('--%s--' % boundary, ''))
body = '\r\n'.join(lines)
headers = {
'content-type': 'multipart/form-data; boundary=' + boundary,
'content-length': str(len(body))
}
return body, headers
def list_folders(self):
"""
List available user folders.
Returns:
A dictionary of folder titles to ids.
"""
folders = {}
folder_request = self.user_content()['folders']
for folder in folder_request:
folders[folder['title']] = folder['id']
return folders
def create_folder(self, name):
"""
Create a folder item.
property to the created folder.
Arguments:
name -- folder name to create
Returns:
folder item id.
"""
folder = None
url = '{}/content/users/{}/createFolder'.format(
self.base_url, self.username)
parameters = {'title': name}
response = self.url_request(url, parameters, 'POST')
if response is not None and 'folder' in response:
folder = response['folder']['id']
return folder
def item(self, item_id=None, repeat=None):
"""
Get back information about a particular item. Must have read
access to the item requested.
Arguments:
item_id: the portal id of the desired item.
Returns:
Dictionary from item response.
"""
results = {}
if item_id:
url = '{}/content/items/{}'.format(self.base_url, item_id)
results = self.url_request(url, repeat=repeat)
return results
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
if None in items:
arcpy.AddError(EMPTY_ITEM_MSG)
return
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
if self.debug:
msg = "Moving items, using {} with parameters {}, got {}".format(
url, parameters, move_response)
arcpy.AddMessage(msg)
return move_response
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
return move_response
def share_items(self, groups=None, everyone=False, org=False, items=None):
"""
Shares one or more items with the specified groups. Can only share
items with groups the user belongs to. Can also share with
the users' current organization, and the public.
Arguments:
groups -- a list of group IDs to share items with
everyone -- publicly share the item (default: False)
org -- share with the users' organization (default: False)
items -- a list of item IDs to update sharing properties on
Returns:
A dictionary of JSON objects, one per item containing the item,
whether sharing was successful, any groups sharing failed with,
and any errors.
"""
if (groups is None and not everyone and not org) or not items:
if self.debug:
arcpy.AddWarning("Invalid sharing options set.")
return
# If shared with everyone, have to share with Org as well
if everyone:
org = True
url = '{}/content/users/{}/shareItems'.format(
self.base_url, self.username)
parameters = {
'everyone': everyone,
'org': org,
'items': ','.join(map(str, items))
}
# sharing with specific groups is optional
if groups:
parameters['groups'] = ','.join(map(str, groups))
sharing_response = self.url_request(url, parameters, 'POST')
if self.debug:
msg = "Sharing items, using {} with parameters {}, got {}".format(
url, parameters, sharing_response)
arcpy.AddMessage(msg)
return sharing_response
def search(self, title=None, item_type=None, group=None,
owner=None, item_id=None, repeat=None, num=10, id_only=True, name=None):
"""
Search for items, a partial implementation of the
search operation of the ArcGIS REST API. Requires one of:
title, item_type, group, owner.
Arguments:
title -- item title
item_type -- item type
group -- item group
owner -- username of item owner
item_id -- item id
repeat -- retry the search, up to this number of times (default: None)
num -- number of results (default: 10)
id_only -- return only IDs of results. If False, will return
full JSON results. (default: True)
Returns:
A list of search results item ids.
"""
query_types = {
'title': title,
'type': item_type,
'group': group,
'owner': self.username, #owner,
'id': item_id,
'name': name
}
query_parts = []
for (label, value) in list(query_types.items()):
if value:
query_parts.append('{}: "{}"'.format(label, value))
if len(query_parts) == 0:
return
elif len(query_parts) == 1:
query = query_parts[0]
else:
query = " AND ".join(query_parts)
if self.debug:
arcpy.AddMessage("Searching for '{}'".format(query))
url = '{}/search'.format(self.base_url)
parameters = {
'num': num,
'q': query
}
response_info = self.url_request(url, parameters)
results = []
if response_info and 'results' in response_info:
if response_info['total'] > 0:
for item in response_info['results']:
if 'id' in item:
if id_only:
results.append(item['id'])
else:
results.append(item)
if self.debug:
if results:
arcpy.AddMessage("Got results! Found items: {}".format(results))
else:
arcpy.AddMessage("No results found.")
# occasional timing conflicts are happening; repeat search until we
# can continue -- the result should be empty since we just deleted it.
if repeat and not results:
repeat -= 1
if repeat <= 0:
return
time.sleep(1)
results = self.search(
title=title, item_type=item_type, group=group, owner=owner,
item_id=item_id, repeat=repeat, num=num, id_only=id_only)
return results
def user(self, username=None):
"""
A user resource representing a registered user of the portal.
Arguments:
username -- user of interest
Returns:
A dictionary of the JSON response.
"""
if username is None:
username = self.username
url = '{}/community/users/{}'.format(self.base_url, username)
return self.url_request(url)
def user_content(self, username=None):
"""
User items and folders.
Arguments:
username -- user of interest
Returns:
A dictionary of user items and folders.
"""
if username is None:
username = self.username
url = '{}/content/users/{}'.format(self.base_url, username)
return self.url_request(url)
def list_groups(self, username=None):
"""
List users' groups.
Returns:
A dictionary of group titles to ids.
"""
groups = {}
if username is None:
username = self.username
groups_request = self.user(username)['groups']
for group in groups_request:
groups[group['title']] = group['id']
return groups
def add_item(self, file_to_upload, username=None, folder_id=None, itemtype=None, params=None):
"""
Adds an item to the portal.
All items are added as multipart. Once the item is added,
Add Part will be called.
Returns:
The response/item_id of the item added.
"""
if username is None:
username = self.username
url = '{}/content/users/{}/{}/addItem'.format(self.base_url, username, folder_id)
parameters = {
'multipart': 'true',
'filename': file_to_upload,
}
if params:
parameters.update(params)
if itemtype:
parameters['type'] = itemtype
else:
try:
file_name, file_ext = os.path.splitext(os.path.basename(file_to_upload))
itemtype = ITEM_TYPES[file_ext.upper()]
except KeyError:
msg = "Unable to upload file: {}, unknown type".format(
file_to_upload)
arcpy.AddError(msg)
return
details = {'filename': file_to_upload}
add_item_res = self.url_request(
url, parameters, request_type="POST", files=details)
return self._add_part(file_to_upload, add_item_res['id'], itemtype)
def _add_part(self, file_to_upload, item_id, upload_type=None):
""" Add item part to an item being uploaded."""
def read_in_chunks(file_object, chunk_size=10000000):
"""Generate file chunks (default: 10MB)"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
url = '{}/content/users/{}/items/{}/addPart'.format(
self.base_url, self.username, item_id)
with open(file_to_upload, 'rb') as f:
for part_num, piece in enumerate(read_in_chunks(f), start=1):
title = os.path.splitext(os.path.basename(file_to_upload))[0]
files = {"file": {"filename": file_to_upload, "content": piece}}
params = {
'f': "json",
'token': self.token,
'partNum': part_num,
'title': title,
'itemType': 'file',
'type': upload_type
}
headers, data = MultipartFormdataEncoder().encodeForm(params, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
def item_status(self, item_id, username=None):
"""
Gets the status of an item.
Returns:
The item's status. (partial | processing | failed | completed)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/status'.format(
self.base_url, username, item_id)
return self.url_request(url)
def commit(self, item_id, username=None):
"""
Commits an item that was uploaded as multipart
Returns:
Result of calling commit. (success: true| false)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/commit'.format(
self.base_url, username, item_id)
return self.url_request(url)
def update_item(self, item_id, metadata, username=None, folder_id=None, title=None):
"""
Updates metadata parts of an item.
Metadata expected as a tuple
Returns:
Result of calling update. (success: true | false)
"""
if username is None:
username = self.username
url = "{}/content/users/{}/{}/items/{}/update".format(
self.base_url, username, folder_id, item_id)
parameters = {
'snippet': metadata[0],
'description': metadata[1],
'tags': metadata[2],
'accessInformation': metadata[3],
'licenseInfo': metadata[4],
'token': self.token,
'f': 'json'
}
if title:
parameters['title'] = title
if len(metadata) > 5:
parameters['thumbnail'] = metadata[5]
with open(metadata[5], 'rb') as f:
d = f.read()
files = {"thumbnail": {"filename": metadata[5], "content": d }}
headers, data = MultipartFormdataEncoder().encodeForm(parameters, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
else:
return self.url_request(url, parameters, 'POST')
def url_request(self, in_url, request_parameters=None, request_type='GET',
additional_headers=None, files=None, repeat=0):
"""
Make a request to the portal, provided a portal URL
and request parameters, returns portal response. By default,
returns a JSON response, and reuses the current token.
Arguments:
in_url -- portal url
request_parameters -- dictionary of request parameters.
request_type -- HTTP verb (default: GET)
additional_headers -- any headers to pass along with the request.
files -- any files to send.
repeat -- repeat the request up to this number of times.
Returns:
dictionary of response from portal instance.
"""
# multipart requests pre-encode the parameters
if request_type == 'MULTIPART':
parameters = request_parameters
else:
parameters = {'f': 'json'}
# if we haven't logged in yet, won't have a valid token
if self.token:
parameters['token'] = self.token
if request_parameters:
parameters.update(request_parameters)
if request_type == 'GET':
req = request('?'.join((in_url, encode(parameters))))
elif request_type == 'MULTIPART':
req = request(in_url, parameters)
elif request_type == 'WEBMAP':
if files:
req = request(in_url, *self.encode_multipart_data(parameters, files))
else:
arcpy.AddWarning("Multipart request made, but no files provided.")
return
else:
req = request(
in_url, encode(parameters).encode('UTF-8'), self.headers)
if additional_headers:
for key, value in list(additional_headers.items()):
req.add_header(key, value)
req.add_header('Accept-encoding', 'gzip')
try:
response = urlopen(req)
except HTTPError as e:
arcpy.AddWarning("{} {} -- {}".format(
HTTP_ERROR_MSG, in_url, e.code))
return
except URLError as e:
arcpy.AddWarning("{} {} -- {}".format(
URL_ERROR_MSG, in_url, e.reason))
return
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
with gzip.GzipFile(fileobj=buf) as gzip_file:
response_bytes = gzip_file.read()
else:
response_bytes = response.read()
response_text = response_bytes.decode('UTF-8')
# occasional timing conflicts; repeat until we get back a valid response.
response_json = json.loads(response_text)
# Check that data returned is not an error object
if not response_json or "error" in response_json:
rerun = False
if repeat > 0:
repeat -= 1
rerun = True
# token has expired. Revalidate, then rerun request
if response_json['error']['code'] == 498:
if self.debug:
arcpy.AddWarning("token invalid, retrying.")
if self.login_method is 'token':
# regenerate the token if we're logged in via the application
self.token_login()
else:
self.login(self.username, self._password, repeat=0)
# after regenerating token, we should have something long-lived
if not self.token or self.valid_for < 5:
arcpy.AddError("Unable to get signin token.")
return
rerun = True
if rerun:
time.sleep(2)
response_json = self.url_request(
in_url, request_parameters, request_type,
additional_headers, files, repeat)
return response_json
def save_file(self, url, saveFile):
"""Saves a file to a given location"""
if self.token:
url += "?token={}".format(self.token)
data = urlopen(url).read()
with open(saveFile, "wb") as out_file:
out_file.write(data)
return saveFile
def assert_json_success(self, data):
"""A function that checks that the input JSON object
is not an error object."""
success = False
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
arcpy.AddWarning("{} {}".format("JSON object returned an error.", str(obj)))
elif 'error' in obj:
err = obj['error']
# format the error message
if 'messageCode' in err:
code = err['messageCode']
elif 'code' in err:
code = err['code']
else:
code = "No code provided."
msg = "Portal error: {}: {}".format(err['message'], code)
if 'details' in err and err['details']:
details = []
for detail in err['details']:
# only use unique detail messages
if detail is not err['message']:
details.append(detail)
if details:
msg += ". Details: {}".format("\n".join(details))
arcpy.AddWarning(msg)
else:
success = True
return success
def _parse_url(self, url=None):
""" Parse a url into components."""
results = None
if url:
results = parse.urlparse(url)
return results
def _normalize_host_url(self, parse_result):
""" Normalize a hostname to include just the validated
location and path."""
host_url = parse_result.netloc
if parse_result.path:
path = parse_result.path
if path[-1] == '/':
path = path[:-1]
host_url += path
return host_url
|
|
# Documented in https://zulip.readthedocs.io/en/latest/subsystems/queuing.html
from typing import Any, Callable, Dict, List, Mapping, Optional, cast, TypeVar, Type
import copy
import signal
import tempfile
from functools import wraps
from threading import Timer
import smtplib
import socket
from django.conf import settings
from django.db import connection
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import \
get_client, get_system_bot, PreregistrationUser, \
get_user_profile_by_id, Message, Realm, UserMessage, UserProfile, \
Client
from zerver.lib.context_managers import lockfile
from zerver.lib.error_notify import do_report_error
from zerver.lib.feedback import handle_feedback
from zerver.lib.queue import SimpleQueueClient, queue_json_publish, retry_event
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.email_notifications import handle_missedmessage_emails
from zerver.lib.push_notifications import handle_push_notification, handle_remove_push_notification, \
initialize_push_notifications
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, internal_send_private_message, notify_realm_export, \
render_incoming_message, do_update_embedded_data, do_mark_stream_messages_as_read
from zerver.lib.url_preview import preview as url_preview
from zerver.lib.digest import handle_digest_email
from zerver.lib.send_email import send_future_email, send_email_from_dict, \
FromAddress, EmailNotDeliveredException, handle_send_email_format_changes
from zerver.lib.email_mirror import process_message as mirror_email, rate_limit_mirror_by_realm, \
is_missed_message_address, extract_and_validate
from zerver.lib.streams import access_stream_by_id
from zerver.tornado.socket import req_redis_key, respond_send_message
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.db import reset_queries
from zerver.lib.redis_utils import get_redis_client
from zerver.context_processors import common_context
from zerver.lib.outgoing_webhook import do_rest_call, get_outgoing_webhook_service_handler
from zerver.models import get_bot_services, RealmAuditLog
from zulip_bots.lib import extract_query_without_mention
from zerver.lib.bot_lib import EmbeddedBotHandler, get_bot_handler, EmbeddedBotQuitException
from zerver.lib.exceptions import RateLimited
from zerver.lib.export import export_realm_wrapper
import os
import sys
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import requests
from io import StringIO
import urllib
logger = logging.getLogger(__name__)
class WorkerDeclarationException(Exception):
pass
ConcreteQueueWorker = TypeVar('ConcreteQueueWorker', bound='QueueProcessingWorker')
def assign_queue(
queue_name: str, enabled: bool=True, queue_type: str="consumer"
) -> Callable[[Type[ConcreteQueueWorker]], Type[ConcreteQueueWorker]]:
def decorate(clazz: Type[ConcreteQueueWorker]) -> Type[ConcreteQueueWorker]:
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz, queue_type)
return clazz
return decorate
worker_classes = {} # type: Dict[str, Type[QueueProcessingWorker]]
queues = {} # type: Dict[str, Dict[str, Type[QueueProcessingWorker]]]
def register_worker(queue_name: str, clazz: Type['QueueProcessingWorker'], queue_type: str) -> None:
if queue_type not in queues:
queues[queue_type] = {}
queues[queue_type][queue_name] = clazz
worker_classes[queue_name] = clazz
def get_worker(queue_name: str) -> 'QueueProcessingWorker':
return worker_classes[queue_name]()
def get_active_worker_queues(queue_type: Optional[str]=None) -> List[str]:
"""Returns all the non-test worker queues."""
if queue_type is None:
return list(worker_classes.keys())
return list(queues[queue_type].keys())
def check_and_send_restart_signal() -> None:
try:
if not connection.is_usable():
logging.warning("*** Sending self SIGUSR1 to trigger a restart.")
os.kill(os.getpid(), signal.SIGUSR1)
except Exception:
pass
def retry_send_email_failures(
func: Callable[[ConcreteQueueWorker, Dict[str, Any]], None]
) -> Callable[['QueueProcessingWorker', Dict[str, Any]], None]:
@wraps(func)
def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
try:
func(worker, data)
except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
def on_failure(event: Dict[str, Any]) -> None:
logging.exception("Event {} failed".format(event))
retry_event(worker.queue_name, data, on_failure)
return wrapper
class QueueProcessingWorker:
queue_name = None # type: str
def __init__(self) -> None:
self.q = None # type: SimpleQueueClient
if self.queue_name is None:
raise WorkerDeclarationException("Queue worker declared without queue_name")
def consume(self, data: Dict[str, Any]) -> None:
raise WorkerDeclarationException("No consumer defined!")
def consume_wrapper(self, data: Dict[str, Any]) -> None:
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR) # nocoverage
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'ab') as f:
f.write(line.encode('utf-8'))
check_and_send_restart_signal()
finally:
reset_queries()
def _log_problem(self) -> None:
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def setup(self) -> None:
self.q = SimpleQueueClient()
def start(self) -> None:
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self) -> None: # nocoverage
self.q.stop_consuming()
class LoopQueueProcessingWorker(QueueProcessingWorker):
sleep_delay = 0
def start(self) -> None: # nocoverage
while True:
# TODO: Probably it'd be better to share code with consume_wrapper()
events = self.q.drain_queue(self.queue_name, json=True)
try:
self.consume_batch(events)
finally:
reset_queries()
time.sleep(self.sleep_delay)
def consume_batch(self, event: List[Dict[str, Any]]) -> None:
raise NotImplementedError
def consume(self, event: Dict[str, Any]) -> None:
"""In LoopQueueProcessingWorker, consume is used just for automated tests"""
self.consume_batch([event])
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def consume(self, data: Dict[str, Any]) -> None:
# TODO: This is the only implementation with Dict cf Mapping; should we simplify?
user_profile = get_user_profile_by_id(data['user_id'])
logging.info("Processing signup for user %s in realm %s" % (
user_profile.email, user_profile.realm.string_id))
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
params = dict(data)
del params['user_id']
params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
params['status'] = 'subscribed'
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
logging.warning("Attempted to sign up already existing email to list: %s" %
(data['email_address'],))
elif r.status_code == 400:
retry_event('signups', data, lambda e: r.raise_for_status())
else:
r.raise_for_status()
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data: Mapping[str, Any]) -> None:
if "email" in data:
# When upgrading from a version up through 1.7.1, there may be
# existing items in the queue with `email` instead of `prereg_id`.
invitee = PreregistrationUser.objects.filter(
email__iexact=data["email"].strip()).latest("invited_at")
else:
invitee = PreregistrationUser.objects.filter(id=data["prereg_id"]).first()
if invitee is None:
# The invitation could have been revoked
return
referrer = get_user_profile_by_id(data["referrer_id"])
logger.info("Sending invitation for realm %s to %s" % (referrer.realm.string_id, invitee.email))
do_send_confirmation_email(invitee, referrer)
# queue invitation reminder for two days from now.
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
send_future_email(
"zerver/emails/invitation_reminder",
referrer.realm,
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language,
context=context,
delay=datetime.timedelta(days=2))
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
logging.debug("Received presence event: %s" % (event,),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails', queue_type="loop")
class MissedMessageWorker(QueueProcessingWorker):
# Aggregate all messages received over the last BATCH_DURATION
# seconds to let someone finish sending a batch of messages and/or
# editing them before they are sent out as emails to recipients.
#
# The timer is running whenever; we poll at most every TIMER_FREQUENCY
# seconds, to avoid excessive activity.
#
# TODO: Since this process keeps events in memory for up to 2
# minutes, it now will lose approximately BATCH_DURATION worth of
# missed_message emails whenever it is restarted as part of a
# server restart. We should probably add some sort of save/reload
# mechanism for that case.
TIMER_FREQUENCY = 5
BATCH_DURATION = 120
timer_event = None # type: Optional[Timer]
events_by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]
batch_start_by_recipient = {} # type: Dict[int, float]
def consume(self, event: Dict[str, Any]) -> None:
logging.debug("Received missedmessage_emails event: %s" % (event,))
# When we process an event, just put it into the queue and ensure we have a timer going.
user_profile_id = event['user_profile_id']
if user_profile_id not in self.batch_start_by_recipient:
self.batch_start_by_recipient[user_profile_id] = time.time()
self.events_by_recipient[user_profile_id].append(event)
self.ensure_timer()
def ensure_timer(self) -> None:
if self.timer_event is not None:
return
self.timer_event = Timer(self.TIMER_FREQUENCY, MissedMessageWorker.maybe_send_batched_emails, [self])
self.timer_event.start()
def stop_timer(self) -> None:
if self.timer_event and self.timer_event.is_alive():
self.timer_event.cancel()
self.timer_event = None
def maybe_send_batched_emails(self) -> None:
self.stop_timer()
current_time = time.time()
for user_profile_id, timestamp in list(self.batch_start_by_recipient.items()):
if current_time - timestamp < self.BATCH_DURATION:
continue
events = self.events_by_recipient[user_profile_id]
logging.info("Batch-processing %s missedmessage_emails events for user %s" %
(len(events), user_profile_id))
handle_missedmessage_emails(user_profile_id, events)
del self.events_by_recipient[user_profile_id]
del self.batch_start_by_recipient[user_profile_id]
# By only restarting the timer if there are actually events in
# the queue, we ensure this queue processor is idle when there
# are no missed-message emails to process.
if len(self.batch_start_by_recipient) > 0:
self.ensure_timer()
@assign_queue('email_senders')
class EmailSendingWorker(QueueProcessingWorker):
@retry_send_email_failures
def consume(self, event: Dict[str, Any]) -> None:
# Copy the event, so that we don't pass the `failed_tries'
# data to send_email_from_dict (which neither takes that
# argument nor needs that data).
copied_event = copy.deepcopy(event)
if 'failed_tries' in copied_event:
del copied_event['failed_tries']
handle_send_email_format_changes(copied_event)
send_email_from_dict(copied_event)
@assign_queue('missedmessage_email_senders')
class MissedMessageSendingWorker(EmailSendingWorker): # nocoverage
"""
Note: Class decorators are not inherited.
The `missedmessage_email_senders` queue was used up through 1.7.1, so we
keep consuming from it in case we've just upgraded from an old version.
After the 1.8 release, we can delete it and tell admins to upgrade to 1.8
first.
"""
# TODO: zulip-1.8: Delete code related to missedmessage_email_senders queue.
pass
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker): # nocoverage
def start(self) -> None:
# initialize_push_notifications doesn't strictly do anything
# beyond printing some logging warnings if push notifications
# are not available in the current configuration.
initialize_push_notifications()
super().start()
def consume(self, data: Mapping[str, Any]) -> None:
if data.get("type", "add") == "remove":
message_ids = data.get('message_ids')
if message_ids is None: # legacy task across an upgrade
message_ids = [data['message_id']]
handle_remove_push_notification(data['user_profile_id'], message_ids)
else:
handle_push_notification(data['user_profile_id'], data)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
logging.info("Received feedback from %s" % (event["sender_email"],))
handle_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
if settings.ERROR_REPORTING:
do_report_error(event['report']['host'], event['type'], event['report'])
@assign_queue('slow_queries', queue_type="loop")
class SlowQueryWorker(LoopQueueProcessingWorker):
# Sleep 1 minute between checking the queue
sleep_delay = 60 * 1
def consume_batch(self, slow_query_events: List[Dict[str, Any]]) -> None:
for event in slow_query_events:
logging.info("Slow query: %s" % (event["query"],))
if settings.SLOW_QUERY_LOGS_STREAM is None:
return
if settings.ERROR_BOT is None:
return
if len(slow_query_events) > 0:
topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)
content = ""
for event in slow_query_events:
content += " %s\n" % (event["query"],)
error_bot_realm = get_system_bot(settings.ERROR_BOT).realm
internal_send_message(error_bot_realm, settings.ERROR_BOT,
"stream", settings.SLOW_QUERY_LOGS_STREAM, topic, content)
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self) -> None:
super().__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event: Mapping[str, Any]) -> None:
server_meta = event['server_meta']
environ = {
'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/messages',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'
}
if 'socket_user_agent' in event['request']:
environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
del event['request']['socket_user_agent']
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
# Note: If we ever support non-POST methods, we'll need to change this.
request._post = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content.decode('utf-8')
response_data = ujson.loads(resp_content)
if response_data['result'] == 'error':
check_and_send_restart_signal()
result = {'response': response_data, 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content})
queue_json_publish(server_meta['return_queue'], result,
respond_send_message)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker): # nocoverage
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event: Mapping[str, Any]) -> None:
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
rcpt_to = event['rcpt_to']
if not is_missed_message_address(rcpt_to):
# Missed message addresses are one-time use, so we don't need
# to worry about emails to them resulting in message spam.
recipient_realm = extract_and_validate(rcpt_to)[0].realm
try:
rate_limit_mirror_by_realm(recipient_realm)
except RateLimited:
msg = email.message_from_string(event["message"])
logger.warning("MirrorWorker: Rejecting an email from: %s "
"to realm: %s - rate limited."
% (msg['From'], recipient_realm.name))
return
mirror_email(email.message_from_string(event["message"]),
rcpt_to=rcpt_to, pre_checked=True)
@assign_queue('test', queue_type="test")
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event: Mapping[str, Any]) -> None: # nocoverage
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
@assign_queue('embed_links')
class FetchLinksEmbedData(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
for url in event['urls']:
url_preview.get_link_embed_data(url)
message = Message.objects.get(id=event['message_id'])
# If the message changed, we will run this task after updating the message
# in zerver.views.messages.update_message_backend
if message.content != event['message_content']:
return
if message.content is not None:
query = UserMessage.objects.filter(
message=message.id
)
message_user_ids = set(query.values_list('user_profile_id', flat=True))
# Fetch the realm whose settings we're using for rendering
realm = Realm.objects.get(id=event['message_realm_id'])
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(
message,
message.content,
message_user_ids,
realm)
do_update_embedded_data(
message.sender, message, message.content, rendered_content)
@assign_queue('outgoing_webhooks')
class OutgoingWebhookWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
message = event['message']
dup_event = cast(Dict[str, Any], event)
dup_event['command'] = message['content']
services = get_bot_services(event['user_profile_id'])
for service in services:
dup_event['service_name'] = str(service.name)
service_handler = get_outgoing_webhook_service_handler(service)
request_data = service_handler.build_bot_request(dup_event)
if request_data:
do_rest_call(service.base_url,
request_data,
dup_event,
service_handler)
@assign_queue('embedded_bots')
class EmbeddedBotWorker(QueueProcessingWorker):
def get_bot_api_client(self, user_profile: UserProfile) -> EmbeddedBotHandler:
return EmbeddedBotHandler(user_profile)
def consume(self, event: Mapping[str, Any]) -> None:
user_profile_id = event['user_profile_id']
user_profile = get_user_profile_by_id(user_profile_id)
message = cast(Dict[str, Any], event['message'])
# TODO: Do we actually want to allow multiple Services per bot user?
services = get_bot_services(user_profile_id)
for service in services:
bot_handler = get_bot_handler(str(service.name))
if bot_handler is None:
logging.error("Error: User %s has bot with invalid embedded bot service %s" % (
user_profile_id, service.name))
continue
try:
if hasattr(bot_handler, 'initialize'):
bot_handler.initialize(self.get_bot_api_client(user_profile))
if event['trigger'] == 'mention':
message['content'] = extract_query_without_mention(
message=message,
client=self.get_bot_api_client(user_profile),
)
assert message['content'] is not None
bot_handler.handle_message(
message=message,
bot_handler=self.get_bot_api_client(user_profile)
)
except EmbeddedBotQuitException as e:
logging.warning(str(e))
@assign_queue('deferred_work')
class DeferredWorker(QueueProcessingWorker):
def consume(self, event: Mapping[str, Any]) -> None:
if event['type'] == 'mark_stream_messages_as_read':
user_profile = get_user_profile_by_id(event['user_profile_id'])
client = Client.objects.get(id=event['client_id'])
for stream_id in event['stream_ids']:
# Since the user just unsubscribed, we don't require
# an active Subscription object (otherwise, private
# streams would never be accessible)
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id,
require_active=False)
do_mark_stream_messages_as_read(user_profile, client, stream)
elif event['type'] == 'realm_export':
realm = Realm.objects.get(id=event['realm_id'])
output_dir = tempfile.mkdtemp(prefix="zulip-export-")
public_url = export_realm_wrapper(realm=realm, output_dir=output_dir,
threads=6, upload=True, public_only=True,
delete_after_upload=True)
assert public_url is not None
# Store the relative URL of the export.
export_event = RealmAuditLog.objects.get(id=event['id'])
export_event.extra_data = ujson.dumps({'export_path': urllib.parse.urlparse(public_url).path,
'deleted_timestamp': None})
export_event.save(update_fields=['extra_data'])
# Send a private message notification letting the user who
# triggered the export know the export finished.
user_profile = get_user_profile_by_id(event['user_profile_id'])
content = "Your data export is complete and has been uploaded here:\n\n%s" % (
public_url,)
internal_send_private_message(
realm=user_profile.realm,
sender=get_system_bot(settings.NOTIFICATION_BOT),
recipient_user=user_profile,
content=content
)
# For future frontend use, also notify administrator
# clients that the export happened.
notify_realm_export(user_profile)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Submit or Cancel spot instance requests.
When submit, the process will block until the request is fulfilled
or the process is killed by user(like CTRL + C),
if the process is killed, the requests will be automatically canceled.
"""
import os
import time
import pickle
import argparse
import subprocess
import yaml
import boto.ec2.blockdevicemapping as bdm
from util import mkdir_p, info, warn, error
from init_aws import get_conn, get_ec2_conf
def get_bdm(ec2_conf):
def device(d):
dev = bdm.BlockDeviceType()
if d['VirtualName'].startswith('ephemeral'):
# Instance Storage
dev.ephemeral_name = d['VirtualName']
else:
# EBS
dev.size = d['Ebs.VolumeSize']
delete = d.get('Ebs.DeleteOnTermination', None)
if delete is not None:
dev.delete_on_termination = delete
return (d['DeviceName'], dev)
devices = map(device, ec2_conf['Block_Device_Mapping'])
device_mapping = bdm.BlockDeviceMapping()
for name, dev in devices:
device_mapping[name] = dev
return device_mapping
def get_init_conf():
return yaml.load(open('conf/init.yml'))
class RequestFailedError(Exception): pass
def all_fulfilled(requests):
fulfilled = True
for r in requests:
if r.status.code != 'fulfilled':
fulfilled = False
if r.state == 'failed':
raise RequestFailedError(r.status.message)
if not fulfilled:
break
return fulfilled
def wait_until_fulfilled(request_ids, conn):
while True:
requests = conn.get_all_spot_instance_requests(request_ids)
if not all_fulfilled(requests):
time.sleep(1)
else:
return requests
def add_tag(host):
return '{}-{}'.format(get_ec2_conf()['Tag'], host)
def get_host(tag):
return tag.split('-')[-1]
# request_id -> tag
def request_id_to_tag(requests, masters):
ret = {}
for i, rid in enumerate([r.id for r in requests]):
# TODO(cc): This naming convention for host may need changes
if i == 0:
host = 'AlluxioMaster'
elif i < masters:
host = 'AlluxioMaster{}'.format(i + 1)
else:
host = 'AlluxioWorker{}'.format(i - masters + 1)
ret[rid] = add_tag(host)
return ret
def save_request_ids(request_ids):
out = open('.request_ids', 'w')
pickle.dump(request_ids, out)
out.close()
def load_request_ids():
return pickle.load(open('.request_ids'))
def submit_request(conn, ec2_conf, masters):
# enable ssh as root without tty
user_data = "#!/bin/bash\n \
echo 'Defaults:root !requiretty' > /etc/sudoers.d/998-vagrant-cloud-init-requiretty && \
echo 'Defaults:ec2-user !requiretty' > /etc/sudoers.d/999-vagrant-cloud-init-requiretty && \
chmod 440 /etc/sudoers.d/998-vagrant-cloud-init-requiretty && chmod 440 /etc/sudoers.d/999-vagrant-cloud-init-requiretty"
requests = conn.request_spot_instances(
price = ec2_conf['Spot_Price'],
image_id = ec2_conf['AMI'],
count = get_init_conf()['MachineNumber'],
availability_zone_group = ec2_conf['Availability_Zone'],
placement = ec2_conf['Availability_Zone'], # where to put instance
key_name = ec2_conf['Keypair'],
security_groups = [ec2_conf['Security_Group']],
user_data = user_data,
instance_type = ec2_conf['Instance_Type'],
block_device_map = get_bdm(ec2_conf))
request_ids = [r.id for r in requests]
save_request_ids(request_ids)
# sleep before waiting for spot instances to be fulfilled.
time.sleep(5)
# block, waiting for all requests to be fulfilled
requests = wait_until_fulfilled(request_ids, conn)
# tag the requests and instances
rid_tag = request_id_to_tag(requests, masters)
for r in requests:
tag = rid_tag[r.id]
r.add_tag('Name', tag)
conn.create_tags([r.instance_id], {'Name': tag})
return rid_tag, requests
def cancel_request(conn):
warn('canceling spot instance requests and terminating instances...')
requests = conn.get_all_spot_instance_requests(load_request_ids())
for r in requests:
r.cancel()
instance_ids = [r.instance_id for r in requests if r.instance_id is not None]
if len(instance_ids) > 0:
conn.terminate_instances(instance_ids)
# mock the inventory file and machine id files that should have
# been generated by vagrant, so that we can keep the vagrant work flow.
def mock_vagrant_info(instance_id_to_tag_ip):
inventory_dir = '.vagrant/provisioners/ansible/inventory'
mkdir_p(inventory_dir)
inventory = open(os.path.join(inventory_dir, 'vagrant_ansible_inventory'), 'w')
for instance_id, tag_ip in instance_id_to_tag_ip.iteritems():
tag, ip = tag_ip
host = get_host(tag)
inventory.write("{} ansible_ssh_host={} ansible_ssh_port=22\n".format(host, ip))
id_dir = os.path.join('.vagrant', 'machines', host, 'aws')
mkdir_p(id_dir)
with open(os.path.join(id_dir, 'id'), 'w') as f:
f.write(instance_id)
inventory.close()
def is_ssh_ready(host):
s = subprocess.Popen(['ssh',
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'ConnectTimeout=30',
'-i', os.path.expanduser(get_ec2_conf()['Key_Path']),
'%s@%s' % ('ec2-user', host),
'true'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
s.communicate()
return s.returncode == 0
def wait_for_ssh(hosts):
while len(hosts):
hosts = [h for h in hosts if not is_ssh_ready(h)]
def parse():
parser = argparse.ArgumentParser()
grp = parser.add_mutually_exclusive_group(required=True)
grp.add_argument('-s', '--submit', action='store_true')
grp.add_argument('-c', '--cancel', action='store_true')
parser.add_argument('--masters', type=int, default=1, help='number of Alluxio masters')
return parser.parse_args()
def main(args):
ec2_conf = get_ec2_conf()
conn = get_conn()
if args.submit:
info('waiting for spot instance requests to be fulfilled, you can cancel by ctrl+c ...')
try:
rid_tag, requests = submit_request(conn, ec2_conf, args.masters)
except (KeyboardInterrupt, RequestFailedError) as e:
error(e)
exit(1)
info('spot instance requests fulfilled')
instance_id_to_tag_ip = {}
info('getting instance IPs...')
for r in requests:
instance_id = r.instance_id
info('waiting for ip to be allocated to the machine')
ip = conn.get_only_instances([instance_id])[0].ip_address
while ip is None:
time.sleep(1)
ip = conn.get_only_instances([instance_id])[0].ip_address
instance_id_to_tag_ip[instance_id] = (rid_tag[r.id], ip)
info('mocking vagrant info under .vagrant...')
mock_vagrant_info(instance_id_to_tag_ip)
info('creation of spot instances done')
info('waiting for ssh to be available...')
wait_for_ssh([ip for tag, ip in instance_id_to_tag_ip.values()])
info('ssh for all instances are ready')
elif args.cancel:
cancel_request(conn)
if __name__ == '__main__':
main(parse())
|
|
import http.server
import io
import json
import os
import pprint
import shutil
import subprocess
import tempfile
import threading
import unittest
import urllib.parse
import warnings
import yaml
try:
import docker
import requests
deps_available=True
except ImportError:
deps_available=False
import repour.validation
# Only run integration tests if able and requested
run_integration_tests = deps_available and "REPOUR_RUN_IT" in os.environ
#
# Utils
#
def wait_in_logs(client, container, target_text):
log = client.logs(
container=container,
stream=True,
)
for raw_line in log:
line = raw_line.decode("utf-8")
if target_text in line:
break
else:
raise Exception("Container exited before target text '{target_text}' was found".format(**locals()))
#
# Tests
#
if run_integration_tests:
da_url = "http://10.19.208.25:8180/da/rest/v-0.4/reports/lookup/gavs"
class TestGitoliteIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
# current file is in test/ relative to repo root
repo_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Docker client
cls.client = docker.Client(version="1.19")
# Build images
def build_image(dockerfile, tag):
for logline in cls.client.build(
path=repo_root,
dockerfile=dockerfile,
rm=True,
forcerm=True,
tag=tag,
):
pass
assert b"Successfully built" in logline, "Build of image {tag} failed".format(**locals())
repour_it_image = "repour_integration_test"
build_image("Dockerfile", repour_it_image)
repour_it_git_image = "repour_integration_test_git"
build_image("Dockerfile.gitolite", repour_it_git_image)
# Create OSE-like Secrets volume dir
cls.config_dir = tempfile.TemporaryDirectory()
# Create key pairs
for n in ["repour", "admin"]:
key_dir = os.path.join(cls.config_dir.name, n)
os.mkdir(key_dir)
priv_key = os.path.join(key_dir, n)
subprocess.check_call(["ssh-keygen", "-q", "-f", priv_key, "-N", ""])
key_owner = os.getuid()
cls.containers = []
try:
# Create/start Git
git_container = cls.client.create_container(
image=repour_it_git_image,
detach=True,
host_config=cls.client.create_host_config(
binds={
cls.config_dir.name: {
"bind": "/mnt/secrets",
"mode": "z",
}
},
),
user=key_owner,
)["Id"]
cls.containers.append(git_container)
cls.client.start(git_container)
cls.git_container = git_container
wait_in_logs(cls.client, git_container, "==> Ready")
git_hostname = cls.client.inspect_container(git_container)["NetworkSettings"]["IPAddress"]
# Create/start Repour
repour_container = cls.client.create_container(
image=repour_it_image,
detach=True,
host_config=cls.client.create_host_config(
links={ git_container: "git" },
binds={
cls.config_dir.name: {
"bind": "/mnt/secrets",
"mode": "z",
}
},
),
# Note that the forced UID change activates au.py, so
# setting REPOUR_GITOLITE_SSH_USER isn't required (will be
# source default user git instead of gitolite3)
user=key_owner,
environment={
"REPOUR_GITOLITE_HOST": git_hostname,
"REPOUR_PME_DA_URL": da_url,
}
)["Id"]
cls.containers.append(repour_container)
cls.client.start(repour_container)
cls.repour_container = repour_container
wait_in_logs(cls.client, repour_container, "Server started on socket")
repour_hostname = cls.client.inspect_container(repour_container)["NetworkSettings"]["IPAddress"]
cls.repour_api_url="http://{repour_hostname}:7331".format(**locals())
cls.requests_session = requests.Session()
# For run(s) to activate the log dumper in tearDownClass
cls.dump_logs = set()
except Exception:
print("\n\nContainer Startup Logs:")
for container in cls.containers:
print(cls.client.logs(container).decode("utf-8"))
print()
cls.client.remove_container(
container=container,
force=True,
)
cls.config_dir.cleanup()
raise
@classmethod
def tearDownClass(cls):
for container in cls.dump_logs:
print("\n\nContainer Logs:")
print(cls.client.logs(container).decode("utf-8"))
print()
for container in cls.containers:
cls.client.remove_container(
container=container,
force=True,
)
cls.config_dir.cleanup()
def run(self, result=None):
# lots of bugs in the dependency libraries at the current versions used
warnings.filterwarnings("ignore", category=ResourceWarning)
result = super().run(result) or result
# Activate log dump if anything didn't succeed
if len(result.errors) + len(result.failures) > 0:
self.dump_logs.add(self.repour_container)
return result
def check_clone(self, url, tag, expected_files=[]):
with tempfile.TemporaryDirectory() as repo_dir:
try:
subprocess.check_output(["git", "clone", "--branch", tag, "--", url, repo_dir], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(e.output)
for expected_file in expected_files:
self.assertTrue(
expr=os.path.exists(os.path.join(repo_dir, expected_file)),
msg="{expected_file} does not exist in internal repository".format(**locals()),
)
def do_pull(self, body, patch=None, expect="ok_pull", expected_files=[]):
if patch is not None:
body = body.copy()
for k,v in patch.items():
if v is not None:
body[k] = v
resp = self.requests_session.post(
url=self.repour_api_url + "/pull",
json=body,
)
ret = resp.json()
try:
if expect == "ok_pull":
self.assertEqual(resp.status_code, 200)
repour.validation.success_pull(ret)
self.assertRegex(ret["branch"], "^branch-pull-[0-9a-f]+$")
self.assertRegex(ret["tag"], "^repour-[0-9a-f]+$")
self.check_clone(
url=ret["url"]["readonly"],
tag=ret["tag"],
expected_files=expected_files,
)
elif expect == "ok_adjust":
self.assertEqual(resp.status_code, 200)
repour.validation.success_pull_adjust(ret)
self.assertRegex(ret["branch"], "^branch-adjust-[0-9a-f]+$")
self.assertRegex(ret["tag"], "^repour-[0-9a-f]+$")
self.check_clone(
url=ret["url"]["readonly"],
tag=ret["tag"],
expected_files=expected_files,
)
self.check_clone(
url=ret["url"]["readonly"],
tag=ret["pull"]["tag"],
expected_files=expected_files,
)
elif expect == "validation_error":
self.assertEqual(resp.status_code, 400)
repour.validation.error_validation(ret)
elif expect == "described_error":
self.assertEqual(resp.status_code, 400)
repour.validation.error_described(ret)
elif expect == "other_error":
self.assertEqual(resp.status_code, 500)
repour.validation.error_other(ret)
else:
raise Exception("Don't know how to expect {}".format(expect))
except Exception:
print("\nResponse Body:")
print(resp.status_code)
pprint.pprint(ret)
print("")
raise
return ret
def test_pull_git(self):
for ref in ["1.5.0.Beta1", "master", None, "2d8307585e97fff3a86c34eb86c681ba81bb1811"]:
with self.subTest(ref=ref):
self.do_pull(
body={
"name": "jboss-modules-1.5.0",
"type": "git",
"url": "https://github.com/jboss-modules/jboss-modules.git",
},
patch={
"ref": ref
},
expected_files=["pom.xml"],
)
def test_name_capitals(self):
body = {
"name": "JGroups",
"type": "git",
"ref": "master",
"url": "https://github.com/belaban/JGroups.git",
}
for i in range(2):
with self.subTest(stage=i):
self.do_pull(
body=body,
expected_files=["pom.xml"],
)
with self.subTest(stage="lowercase"):
self.do_pull(
body=body,
patch={
"name": body["name"].lower(),
},
expected_files=["pom.xml"],
)
def test_pull_hg(self):
for ref in ["default", None]:
with self.subTest(ref=ref):
ret = self.do_pull(
body={
"name": "hello",
"type": "hg",
"url": "https://selenic.com/repo/hello",
},
patch={
"ref": ref,
},
expected_files=["Makefile"],
)
self.assertIn("hello", ret["url"]["readonly"])
def test_pull_svn(self):
for ref,suffix in [(None,"tags/commons-io-1.3.2"), ("1709188","trunk")]:
with self.subTest(ref=ref, suffix=suffix):
self.do_pull(
body={
"name": "apache-commons-io",
"type": "svn",
"url": "https://svn.apache.org/repos/asf/commons/proper/io/" + suffix,
},
patch={
"ref": ref,
},
expected_files=["pom.xml"],
)
def test_pull_archive(self):
for ext in [".tar.gz", ".zip"]:
with self.subTest(ext=ext):
self.do_pull(
body={
"name": "jboss-modules-1.5.0",
"type": "archive",
"url": "https://github.com/jboss-modules/jboss-modules/archive/1.4.4.Final" + ext,
},
expected_files=["pom.xml"],
)
def test_callback(self):
server_ip = self.client.inspect_container(self.repour_container)["NetworkSettings"]["Gateway"]
server_port = 8080
server_done = threading.Event()
callback_data = None
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
nonlocal callback_data
size = int(self.headers.get("content-length", 0))
callback_data = json.loads(self.rfile.read(size).decode("utf-8"))
self.send_response(200)
self.end_headers()
self.flush_headers()
server_done.set()
def log_request(self, format, *args):
pass
server = http.server.HTTPServer((server_ip, server_port), Handler)
server_thread = threading.Thread(target=server.serve_forever, daemon=True)
try:
server_thread.start()
resp = self.requests_session.post(
url=self.repour_api_url + "/pull",
json={
"name": "jboss-modules",
"type": "git",
"url": "https://github.com/jboss-modules/jboss-modules.git",
"ref": "master",
"callback": {
"url": "http://{server_ip}:{server_port}/".format(**locals()),
},
},
)
ret = resp.json()
server_done.wait(timeout=10)
finally:
server.shutdown()
server_thread.join(timeout=5)
self.assertEqual(200, callback_data["callback"]["status"])
self.assertEqual(ret["callback"]["id"], callback_data["callback"]["id"])
# TODO possibly use decorator on adjust tests to skip if PME restURL host isn't accessible
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Defines functionality for pipelined execution of interfaces
The `Workflow` class provides core functionality for batch processing.
.. testsetup::
# Change directory to provide relative paths for doctests
import os
filepath = os.path.dirname(os.path.realpath( __file__ ))
datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
os.chdir(datadir)
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from datetime import datetime
from nipype.utils.misc import flatten, unflatten
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from copy import deepcopy
import pickle
from glob import glob
import gzip
import inspect
import os
import os.path as op
import re
import shutil
import errno
import socket
from shutil import rmtree
import sys
from tempfile import mkdtemp
from warnings import warn
from hashlib import sha1
import numpy as np
import networkx as nx
from ...utils.misc import package_check, str2bool
package_check('networkx', '1.3')
from ... import config, logging
logger = logging.getLogger('workflow')
from ...interfaces.base import (traits, InputMultiPath, CommandLine,
Undefined, TraitedSpec, DynamicTraitedSpec,
Bunch, InterfaceResult, md5, Interface,
TraitDictObject, TraitListObject, isdefined)
from ...utils.misc import (getsource, create_function_from_source,
flatten, unflatten)
from ...utils.filemanip import (save_json, FileNotFoundError,
filename_to_list, list_to_filename,
copyfiles, fnames_presuffix, loadpkl,
split_filename, load_json, savepkl,
write_rst_header, write_rst_dict,
write_rst_list)
from ...external.six import string_types
from .utils import (generate_expanded_graph, modify_paths,
export_graph, make_output_dir, write_workflow_prov,
clean_working_directory, format_dot, topological_sort,
get_print_name, merge_dict, evaluate_connect_function,
_write_inputs, format_node)
from .base import EngineBase
from .nodes import Node, MapNode
class Workflow(EngineBase):
"""Controls the setup and execution of a pipeline of processes."""
def __init__(self, name, base_dir=None):
"""Create a workflow object.
Parameters
----------
name : alphanumeric string
unique identifier for the workflow
base_dir : string, optional
path to workflow storage
"""
super(Workflow, self).__init__(name, base_dir)
self._graph = nx.DiGraph()
self.config = deepcopy(config._sections)
# PUBLIC API
def clone(self, name):
"""Clone a workflow
.. note::
Will reset attributes used for executing workflow. See
_init_runtime_fields.
Parameters
----------
name: alphanumeric name
unique name for the workflow
"""
clone = super(Workflow, self).clone(name)
clone._reset_hierarchy()
return clone
# Graph creation functions
def connect(self, *args, **kwargs):
"""Connect nodes in the pipeline.
This routine also checks if inputs and outputs are actually provided by
the nodes that are being connected.
Creates edges in the directed graph using the nodes and edges specified
in the `connection_list`. Uses the NetworkX method
DiGraph.add_edges_from.
Parameters
----------
args : list or a set of four positional arguments
Four positional arguments of the form::
connect(source, sourceoutput, dest, destinput)
source : nodewrapper node
sourceoutput : string (must be in source.outputs)
dest : nodewrapper node
destinput : string (must be in dest.inputs)
A list of 3-tuples of the following form::
[(source, target,
[('sourceoutput/attribute', 'targetinput'),
...]),
...]
Or::
[(source, target, [(('sourceoutput1', func, arg2, ...),
'targetinput'), ...]),
...]
sourceoutput1 will always be the first argument to func
and func will be evaluated and the results sent ot targetinput
currently func needs to define all its needed imports within the
function as we use the inspect module to get at the source code
and execute it remotely
"""
if len(args) == 1:
connection_list = args[0]
elif len(args) == 4:
connection_list = [(args[0], args[2], [(args[1], args[3])])]
else:
raise TypeError('connect() takes either 4 arguments, or 1 list of'
' connection tuples (%d args given)' % len(args))
disconnect = False
if kwargs:
disconnect = kwargs.get('disconnect', False)
if disconnect:
self.disconnect(connection_list)
return
newnodes = []
for srcnode, destnode, _ in connection_list:
if self in [srcnode, destnode]:
msg = ('Workflow connect cannot contain itself as node:'
' src[%s] dest[%s] workflow[%s]') % (srcnode,
destnode,
self.name)
raise IOError(msg)
if (srcnode not in newnodes) and not self._has_node(srcnode):
newnodes.append(srcnode)
if (destnode not in newnodes) and not self._has_node(destnode):
newnodes.append(destnode)
if newnodes:
self._check_nodes(newnodes)
for node in newnodes:
if node._hierarchy is None:
node._hierarchy = self.name
not_found = []
connected_ports = {}
for srcnode, destnode, connects in connection_list:
if destnode not in connected_ports:
connected_ports[destnode] = []
# check to see which ports of destnode are already
# connected.
if not disconnect and (destnode in self._graph.nodes()):
for edge in self._graph.in_edges_iter(destnode):
data = self._graph.get_edge_data(*edge)
for sourceinfo, destname in data['connect']:
if destname not in connected_ports[destnode]:
connected_ports[destnode] += [destname]
for source, dest in connects:
# Currently datasource/sink/grabber.io modules
# determine their inputs/outputs depending on
# connection settings. Skip these modules in the check
if dest in connected_ports[destnode]:
raise Exception("""
Trying to connect %s:%s to %s:%s but input '%s' of node '%s' is already
connected.
""" % (srcnode, source, destnode, dest, dest, destnode))
if not (hasattr(destnode, '_interface') and
'.io' in str(destnode._interface.__class__)):
if not destnode._check_inputs(dest):
not_found.append(['in', destnode.name, dest])
if not (hasattr(srcnode, '_interface') and
'.io' in str(srcnode._interface.__class__)):
if isinstance(source, tuple):
# handles the case that source is specified
# with a function
sourcename = source[0]
elif isinstance(source, string_types):
sourcename = source
else:
raise Exception(('Unknown source specification in '
'connection from output of %s') %
srcnode.name)
if sourcename and not srcnode._check_outputs(sourcename):
not_found.append(['out', srcnode.name, sourcename])
connected_ports[destnode] += [dest]
infostr = []
for info in not_found:
infostr += ["Module %s has no %sput called %s\n" % (info[1],
info[0],
info[2])]
if not_found:
raise Exception('\n'.join(['Some connections were not found'] +
infostr))
# turn functions into strings
for srcnode, destnode, connects in connection_list:
for idx, (src, dest) in enumerate(connects):
if isinstance(src, tuple) and not isinstance(src[1], string_types):
function_source = getsource(src[1])
connects[idx] = ((src[0], function_source, src[2:]), dest)
# add connections
for srcnode, destnode, connects in connection_list:
edge_data = self._graph.get_edge_data(srcnode, destnode, None)
if edge_data:
logger.debug('(%s, %s): Edge data exists: %s'
% (srcnode, destnode, str(edge_data)))
for data in connects:
if data not in edge_data['connect']:
edge_data['connect'].append(data)
if disconnect:
logger.debug('Removing connection: %s' % str(data))
edge_data['connect'].remove(data)
if edge_data['connect']:
self._graph.add_edges_from([(srcnode,
destnode,
edge_data)])
else:
# pass
logger.debug('Removing connection: %s->%s' % (srcnode,
destnode))
self._graph.remove_edges_from([(srcnode, destnode)])
elif not disconnect:
logger.debug('(%s, %s): No edge data' % (srcnode, destnode))
self._graph.add_edges_from([(srcnode, destnode,
{'connect': connects})])
edge_data = self._graph.get_edge_data(srcnode, destnode)
logger.debug('(%s, %s): new edge data: %s' % (srcnode, destnode,
str(edge_data)))
def disconnect(self, *args):
"""Disconnect nodes
See the docstring for connect for format.
"""
if len(args) == 1:
connection_list = args[0]
elif len(args) == 4:
connection_list = [(args[0], args[2], [(args[1], args[3])])]
else:
raise TypeError('disconnect() takes either 4 arguments, or 1 list '
'of connection tuples (%d args given)' % len(args))
for srcnode, dstnode, conn in connection_list:
logger.debug('disconnect(): %s->%s %s' % (srcnode, dstnode, conn))
if self in [srcnode, dstnode]:
raise IOError(
'Workflow connect cannot contain itself as node: src[%s] '
'dest[%s] workflow[%s]') % (srcnode, dstnode, self.name)
# If node is not in the graph, not connected
if not self._has_node(srcnode) or not self._has_node(dstnode):
continue
edge_data = self._graph.get_edge_data(
srcnode, dstnode, {'connect': []})
ed_conns = [(c[0], c[1]) for c in edge_data['connect']]
remove = []
for edge in conn:
if edge in ed_conns:
idx = ed_conns.index(edge)
remove.append((edge[0], edge[1]))
logger.debug('disconnect(): remove list %s' % remove)
for el in remove:
edge_data['connect'].remove(el)
logger.debug('disconnect(): removed connection %s' % str(el))
if not edge_data['connect']:
self._graph.remove_edge(srcnode, dstnode)
else:
self._graph.add_edges_from([(srcnode, dstnode, edge_data)])
def add_nodes(self, nodes):
""" Add nodes to a workflow
Parameters
----------
nodes : list
A list of EngineBase-based objects
"""
newnodes = []
all_nodes = self._get_all_nodes()
for node in nodes:
if self._has_node(node):
raise IOError('Node %s already exists in the workflow' % node)
if isinstance(node, Workflow):
for subnode in node._get_all_nodes():
if subnode in all_nodes:
raise IOError(('Subnode %s of node %s already exists '
'in the workflow') % (subnode, node))
newnodes.append(node)
if not newnodes:
logger.debug('no new nodes to add')
return
for node in newnodes:
if not issubclass(node.__class__, EngineBase):
raise Exception('Node %s must be a subclass of EngineBase' %
str(node))
self._check_nodes(newnodes)
for node in newnodes:
if node._hierarchy is None:
node._hierarchy = self.name
self._graph.add_nodes_from(newnodes)
def remove_nodes(self, nodes):
""" Remove nodes from a workflow
Parameters
----------
nodes : list
A list of EngineBase-based objects
"""
self._graph.remove_nodes_from(nodes)
# Input-Output access
@property
def inputs(self):
return self._get_inputs()
@property
def outputs(self):
return self._get_outputs()
def get_node(self, name):
"""Return an internal node by name
"""
nodenames = name.split('.')
nodename = nodenames[0]
outnode = [node for node in self._graph.nodes() if
str(node).endswith('.' + nodename)]
if outnode:
outnode = outnode[0]
if nodenames[1:] and issubclass(outnode.__class__, Workflow):
outnode = outnode.get_node('.'.join(nodenames[1:]))
else:
outnode = None
return outnode
def list_node_names(self):
"""List names of all nodes in a workflow
"""
outlist = []
for node in nx.topological_sort(self._graph):
if isinstance(node, Workflow):
outlist.extend(['.'.join((node.name, nodename)) for nodename in
node.list_node_names()])
else:
outlist.append(node.name)
return sorted(outlist)
def write_graph(self, dotfilename='graph.dot', graph2use='hierarchical',
format="png", simple_form=True):
"""Generates a graphviz dot file and a png file
Parameters
----------
graph2use: 'orig', 'hierarchical' (default), 'flat', 'exec', 'colored'
orig - creates a top level graph without expanding internal
workflow nodes;
flat - expands workflow nodes recursively;
hierarchical - expands workflow nodes recursively with a
notion on hierarchy;
colored - expands workflow nodes recursively with a
notion on hierarchy in color;
exec - expands workflows to depict iterables
format: 'png', 'svg'
simple_form: boolean (default: True)
Determines if the node name used in the graph should be of the form
'nodename (package)' when True or 'nodename.Class.package' when
False.
"""
graphtypes = ['orig', 'flat', 'hierarchical', 'exec', 'colored']
if graph2use not in graphtypes:
raise ValueError('Unknown graph2use keyword. Must be one of: ' +
str(graphtypes))
base_dir, dotfilename = op.split(dotfilename)
if base_dir == '':
if self.base_dir:
base_dir = self.base_dir
if self.name:
base_dir = op.join(base_dir, self.name)
else:
base_dir = os.getcwd()
base_dir = make_output_dir(base_dir)
if graph2use in ['hierarchical', 'colored']:
dotfilename = op.join(base_dir, dotfilename)
self.write_hierarchical_dotfile(dotfilename=dotfilename,
colored=graph2use == "colored",
simple_form=simple_form)
format_dot(dotfilename, format=format)
else:
graph = self._graph
if graph2use in ['flat', 'exec']:
graph = self._create_flat_graph()
if graph2use == 'exec':
graph = generate_expanded_graph(deepcopy(graph))
export_graph(graph, base_dir, dotfilename=dotfilename,
format=format, simple_form=simple_form)
def write_hierarchical_dotfile(self, dotfilename=None, colored=False,
simple_form=True):
dotlist = ['digraph %s{' % self.name]
dotlist.append(self._get_dot(prefix=' ', colored=colored,
simple_form=simple_form))
dotlist.append('}')
dotstr = '\n'.join(dotlist)
if dotfilename:
fp = open(dotfilename, 'wt')
fp.writelines(dotstr)
fp.close()
else:
logger.info(dotstr)
def export(self, filename=None, prefix="output", format="python",
include_config=False):
"""Export object into a different format
Parameters
----------
filename: string
file to save the code to; overrides prefix
prefix: string
prefix to use for output file
format: string
one of "python"
include_config: boolean
whether to include node and workflow config values
"""
formats = ["python"]
if format not in formats:
raise ValueError('format must be one of: %s' % '|'.join(formats))
flatgraph = self._create_flat_graph()
nodes = nx.topological_sort(flatgraph)
lines = ['# Workflow']
importlines = ['from nipype.pipeline.engine import Workflow, '
'Node, MapNode']
functions = {}
if format == "python":
connect_template = '%s.connect(%%s, %%s, %%s, "%%s")' % self.name
connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' \
% self.name
wfdef = '%s = Workflow("%s")' % (self.name, self.name)
lines.append(wfdef)
if include_config:
lines.append('%s.config = %s' % (self.name, self.config))
for idx, node in enumerate(nodes):
nodename = node.fullname.replace('.', '_')
# write nodes
nodelines = format_node(node, format='python',
include_config=include_config)
for line in nodelines:
if line.startswith('from'):
if line not in importlines:
importlines.append(line)
else:
lines.append(line)
# write connections
for u, _, d in flatgraph.in_edges_iter(nbunch=node,
data=True):
for cd in d['connect']:
if isinstance(cd[0], tuple):
args = list(cd[0])
if args[1] in functions:
funcname = functions[args[1]]
else:
func = create_function_from_source(args[1])
funcname = [name for name in func.__globals__
if name != '__builtins__'][0]
functions[args[1]] = funcname
args[1] = funcname
args = tuple([arg for arg in args if arg])
line_args = (u.fullname.replace('.', '_'),
args, nodename, cd[1])
line = connect_template % line_args
line = line.replace("'%s'" % funcname, funcname)
lines.append(line)
else:
line_args = (u.fullname.replace('.', '_'),
cd[0], nodename, cd[1])
lines.append(connect_template2 % line_args)
functionlines = ['# Functions']
for function in functions:
functionlines.append(pickle.loads(function).rstrip())
all_lines = importlines + functionlines + lines
if not filename:
filename = '%s%s.py' % (prefix, self.name)
with open(filename, 'wt') as fp:
fp.writelines('\n'.join(all_lines))
return all_lines
def run(self, plugin=None, plugin_args=None, updatehash=False):
""" Execute the workflow
Parameters
----------
plugin: plugin name or object
Plugin to use for execution. You can create your own plugins for
execution.
plugin_args : dictionary containing arguments to be sent to plugin
constructor. see individual plugin doc strings for details.
"""
if plugin is None:
plugin = config.get('execution', 'plugin')
if not isinstance(plugin, string_types):
runner = plugin
else:
name = 'nipype.pipeline.plugins'
try:
__import__(name)
except ImportError:
msg = 'Could not import plugin module: %s' % name
logger.error(msg)
raise ImportError(msg)
else:
plugin_mod = getattr(sys.modules[name], '%sPlugin' % plugin)
runner = plugin_mod(plugin_args=plugin_args)
flatgraph = self._create_flat_graph()
self.config = merge_dict(deepcopy(config._sections), self.config)
if 'crashdump_dir' in self.config:
warn(("Deprecated: workflow.config['crashdump_dir']\n"
"Please use config['execution']['crashdump_dir']"))
crash_dir = self.config['crashdump_dir']
self.config['execution']['crashdump_dir'] = crash_dir
del self.config['crashdump_dir']
logger.info(str(sorted(self.config)))
self._set_needed_outputs(flatgraph)
execgraph = generate_expanded_graph(deepcopy(flatgraph))
for index, node in enumerate(execgraph.nodes()):
node.config = merge_dict(deepcopy(self.config), node.config)
node.base_dir = self.base_dir
node.index = index
if isinstance(node, MapNode):
node.use_plugin = (plugin, plugin_args)
self._configure_exec_nodes(execgraph)
if str2bool(self.config['execution']['create_report']):
self._write_report_info(self.base_dir, self.name, execgraph)
runner.run(execgraph, updatehash=updatehash, config=self.config)
datestr = datetime.utcnow().strftime('%Y%m%dT%H%M%S')
if str2bool(self.config['execution']['write_provenance']):
prov_base = op.join(self.base_dir,
'workflow_provenance_%s' % datestr)
logger.info('Provenance file prefix: %s' % prov_base)
write_workflow_prov(execgraph, prov_base, format='all')
return execgraph
# PRIVATE API AND FUNCTIONS
def _write_report_info(self, workingdir, name, graph):
if workingdir is None:
workingdir = os.getcwd()
report_dir = op.join(workingdir, name)
if not op.exists(report_dir):
os.makedirs(report_dir)
shutil.copyfile(op.join(op.dirname(__file__),
'report_template.html'),
op.join(report_dir, 'index.html'))
shutil.copyfile(op.join(op.dirname(__file__),
'..', '..', 'external', 'd3.js'),
op.join(report_dir, 'd3.js'))
nodes, groups = topological_sort(graph, depth_first=True)
graph_file = op.join(report_dir, 'graph1.json')
json_dict = {'nodes': [], 'links': [], 'groups': [], 'maxN': 0}
for i, node in enumerate(nodes):
report_file = "%s/_report/report.rst" % \
node.output_dir().replace(report_dir, '')
result_file = "%s/result_%s.pklz" % \
(node.output_dir().replace(report_dir, ''),
node.name)
json_dict['nodes'].append(dict(name='%d_%s' % (i, node.name),
report=report_file,
result=result_file,
group=groups[i]))
maxN = 0
for gid in np.unique(groups):
procs = [i for i, val in enumerate(groups) if val == gid]
N = len(procs)
if N > maxN:
maxN = N
json_dict['groups'].append(dict(procs=procs,
total=N,
name='Group_%05d' % gid))
json_dict['maxN'] = maxN
for u, v in graph.in_edges_iter():
json_dict['links'].append(dict(source=nodes.index(u),
target=nodes.index(v),
value=1))
save_json(graph_file, json_dict)
graph_file = op.join(report_dir, 'graph.json')
# Avoid RuntimeWarning: divide by zero encountered in log10
num_nodes = len(nodes)
if num_nodes > 0:
index_name = np.ceil(np.log10(num_nodes)).astype(int)
else:
index_name = 0
template = '%%0%dd_' % index_name
def getname(u, i):
name_parts = u.fullname.split('.')
# return '.'.join(name_parts[:-1] + [template % i + name_parts[-1]])
return template % i + name_parts[-1]
json_dict = []
for i, node in enumerate(nodes):
imports = []
for u, v in graph.in_edges_iter(nbunch=node):
imports.append(getname(u, nodes.index(u)))
json_dict.append(dict(name=getname(node, i),
size=1,
group=groups[i],
imports=imports))
save_json(graph_file, json_dict)
def _set_needed_outputs(self, graph):
"""Initialize node with list of which outputs are needed."""
rm_outputs = self.config['execution']['remove_unnecessary_outputs']
if not str2bool(rm_outputs):
return
for node in graph.nodes():
node.needed_outputs = []
for edge in graph.out_edges_iter(node):
data = graph.get_edge_data(*edge)
sourceinfo = [v1[0] if isinstance(v1, tuple) else v1
for v1, v2 in data['connect']]
node.needed_outputs += [v for v in sourceinfo
if v not in node.needed_outputs]
if node.needed_outputs:
node.needed_outputs = sorted(node.needed_outputs)
def _configure_exec_nodes(self, graph):
"""Ensure that each node knows where to get inputs from
"""
for node in graph.nodes():
node.input_source = {}
for edge in graph.in_edges_iter(node):
data = graph.get_edge_data(*edge)
for sourceinfo, field in data['connect']:
node.input_source[field] = \
(op.join(edge[0].output_dir(),
'result_%s.pklz' % edge[0].name),
sourceinfo)
def _check_nodes(self, nodes):
"""Checks if any of the nodes are already in the graph
"""
node_names = [node.name for node in self._graph.nodes()]
node_lineage = [node._hierarchy for node in self._graph.nodes()]
for node in nodes:
if node.name in node_names:
idx = node_names.index(node.name)
if node_lineage[idx] in [node._hierarchy, self.name]:
raise IOError('Duplicate node name %s found.' % node.name)
else:
node_names.append(node.name)
def _has_attr(self, parameter, subtype='in'):
"""Checks if a parameter is available as an input or output
"""
if subtype == 'in':
subobject = self.inputs
else:
subobject = self.outputs
attrlist = parameter.split('.')
cur_out = subobject
for attr in attrlist:
if not hasattr(cur_out, attr):
return False
cur_out = getattr(cur_out, attr)
return True
def _get_parameter_node(self, parameter, subtype='in'):
"""Returns the underlying node corresponding to an input or
output parameter
"""
if subtype == 'in':
subobject = self.inputs
else:
subobject = self.outputs
attrlist = parameter.split('.')
cur_out = subobject
for attr in attrlist[:-1]:
cur_out = getattr(cur_out, attr)
return cur_out.traits()[attrlist[-1]].node
def _check_outputs(self, parameter):
return self._has_attr(parameter, subtype='out')
def _check_inputs(self, parameter):
return self._has_attr(parameter, subtype='in')
def _get_inputs(self):
"""Returns the inputs of a workflow
This function does not return any input ports that are already
connected
"""
inputdict = TraitedSpec()
for node in self._graph.nodes():
inputdict.add_trait(node.name, traits.Instance(TraitedSpec))
if isinstance(node, Workflow):
setattr(inputdict, node.name, node.inputs)
else:
taken_inputs = []
for _, _, d in self._graph.in_edges_iter(nbunch=node,
data=True):
for cd in d['connect']:
taken_inputs.append(cd[1])
unconnectedinputs = TraitedSpec()
for key, trait in list(node.inputs.items()):
if key not in taken_inputs:
unconnectedinputs.add_trait(key,
traits.Trait(trait,
node=node))
value = getattr(node.inputs, key)
setattr(unconnectedinputs, key, value)
setattr(inputdict, node.name, unconnectedinputs)
getattr(inputdict, node.name).on_trait_change(self._set_input)
return inputdict
def _get_outputs(self):
"""Returns all possible output ports that are not already connected
"""
outputdict = TraitedSpec()
for node in self._graph.nodes():
outputdict.add_trait(node.name, traits.Instance(TraitedSpec))
if isinstance(node, Workflow):
setattr(outputdict, node.name, node.outputs)
elif node.outputs:
outputs = TraitedSpec()
for key, _ in list(node.outputs.items()):
outputs.add_trait(key, traits.Any(node=node))
setattr(outputs, key, None)
setattr(outputdict, node.name, outputs)
return outputdict
def _set_input(self, object, name, newvalue):
"""Trait callback function to update a node input
"""
object.traits()[name].node.set_input(name, newvalue)
def _set_node_input(self, node, param, source, sourceinfo):
"""Set inputs of a node given the edge connection"""
if isinstance(sourceinfo, string_types):
val = source.get_output(sourceinfo)
elif isinstance(sourceinfo, tuple):
if callable(sourceinfo[1]):
val = sourceinfo[1](source.get_output(sourceinfo[0]),
*sourceinfo[2:])
newval = val
if isinstance(val, TraitDictObject):
newval = dict(val)
if isinstance(val, TraitListObject):
newval = val[:]
logger.debug('setting node input: %s->%s', param, str(newval))
node.set_input(param, deepcopy(newval))
def _get_all_nodes(self):
allnodes = []
for node in self._graph.nodes():
if isinstance(node, Workflow):
allnodes.extend(node._get_all_nodes())
else:
allnodes.append(node)
return allnodes
def _has_node(self, wanted_node):
for node in self._graph.nodes():
if wanted_node == node:
return True
if isinstance(node, Workflow):
if node._has_node(wanted_node):
return True
return False
def _create_flat_graph(self):
"""Make a simple DAG where no node is a workflow."""
logger.debug('Creating flat graph for workflow: %s', self.name)
workflowcopy = deepcopy(self)
workflowcopy._generate_flatgraph()
return workflowcopy._graph
def _reset_hierarchy(self):
"""Reset the hierarchy on a graph
"""
for node in self._graph.nodes():
if isinstance(node, Workflow):
node._reset_hierarchy()
for innernode in node._graph.nodes():
innernode._hierarchy = '.'.join((self.name,
innernode._hierarchy))
else:
node._hierarchy = self.name
def _generate_flatgraph(self):
"""Generate a graph containing only Nodes or MapNodes
"""
logger.debug('expanding workflow: %s', self)
nodes2remove = []
if not nx.is_directed_acyclic_graph(self._graph):
raise Exception(('Workflow: %s is not a directed acyclic graph '
'(DAG)') % self.name)
nodes = nx.topological_sort(self._graph)
for node in nodes:
logger.debug('processing node: %s' % node)
if isinstance(node, Workflow):
nodes2remove.append(node)
# use in_edges instead of in_edges_iter to allow
# disconnections to take place properly. otherwise, the
# edge dict is modified.
for u, _, d in self._graph.in_edges(nbunch=node, data=True):
logger.debug('in: connections-> %s' % str(d['connect']))
for cd in deepcopy(d['connect']):
logger.debug("in: %s" % str(cd))
dstnode = node._get_parameter_node(cd[1], subtype='in')
srcnode = u
srcout = cd[0]
dstin = cd[1].split('.')[-1]
logger.debug('in edges: %s %s %s %s' %
(srcnode, srcout, dstnode, dstin))
self.disconnect(u, cd[0], node, cd[1])
self.connect(srcnode, srcout, dstnode, dstin)
# do not use out_edges_iter for reasons stated in in_edges
for _, v, d in self._graph.out_edges(nbunch=node, data=True):
logger.debug('out: connections-> %s' % str(d['connect']))
for cd in deepcopy(d['connect']):
logger.debug("out: %s" % str(cd))
dstnode = v
if isinstance(cd[0], tuple):
parameter = cd[0][0]
else:
parameter = cd[0]
srcnode = node._get_parameter_node(parameter,
subtype='out')
if isinstance(cd[0], tuple):
srcout = list(cd[0])
srcout[0] = parameter.split('.')[-1]
srcout = tuple(srcout)
else:
srcout = parameter.split('.')[-1]
dstin = cd[1]
logger.debug('out edges: %s %s %s %s' % (srcnode,
srcout,
dstnode,
dstin))
self.disconnect(node, cd[0], v, cd[1])
self.connect(srcnode, srcout, dstnode, dstin)
# expand the workflow node
# logger.debug('expanding workflow: %s', node)
node._generate_flatgraph()
for innernode in node._graph.nodes():
innernode._hierarchy = '.'.join((self.name,
innernode._hierarchy))
self._graph.add_nodes_from(node._graph.nodes())
self._graph.add_edges_from(node._graph.edges(data=True))
if nodes2remove:
self._graph.remove_nodes_from(nodes2remove)
logger.debug('finished expanding workflow: %s', self)
def _get_dot(self, prefix=None, hierarchy=None, colored=False,
simple_form=True, level=0):
"""Create a dot file with connection info
"""
if prefix is None:
prefix = ' '
if hierarchy is None:
hierarchy = []
colorset = ['#FFFFC8', '#0000FF', '#B4B4FF', '#E6E6FF', '#FF0000',
'#FFB4B4', '#FFE6E6', '#00A300', '#B4FFB4', '#E6FFE6']
dotlist = ['%slabel="%s";' % (prefix, self.name)]
for node in nx.topological_sort(self._graph):
fullname = '.'.join(hierarchy + [node.fullname])
nodename = fullname.replace('.', '_')
if not isinstance(node, Workflow):
node_class_name = get_print_name(node, simple_form=simple_form)
if not simple_form:
node_class_name = '.'.join(node_class_name.split('.')[1:])
if hasattr(node, 'iterables') and node.iterables:
dotlist.append(('%s[label="%s", shape=box3d,'
'style=filled, color=black, colorscheme'
'=greys7 fillcolor=2];') % (nodename,
node_class_name))
else:
if colored:
dotlist.append(('%s[label="%s", style=filled,'
' fillcolor="%s"];')
% (nodename, node_class_name,
colorset[level]))
else:
dotlist.append(('%s[label="%s"];')
% (nodename, node_class_name))
for node in nx.topological_sort(self._graph):
if isinstance(node, Workflow):
fullname = '.'.join(hierarchy + [node.fullname])
nodename = fullname.replace('.', '_')
dotlist.append('subgraph cluster_%s {' % nodename)
if colored:
dotlist.append(prefix + prefix + 'edge [color="%s"];' % (colorset[level + 1]))
dotlist.append(prefix + prefix + 'style=filled;')
dotlist.append(prefix + prefix + 'fillcolor="%s";' % (colorset[level + 2]))
dotlist.append(node._get_dot(prefix=prefix + prefix,
hierarchy=hierarchy + [self.name],
colored=colored,
simple_form=simple_form, level=level + 3))
dotlist.append('}')
if level == 6:
level = 2
else:
for subnode in self._graph.successors_iter(node):
if node._hierarchy != subnode._hierarchy:
continue
if not isinstance(subnode, Workflow):
nodefullname = '.'.join(hierarchy + [node.fullname])
subnodefullname = '.'.join(hierarchy +
[subnode.fullname])
nodename = nodefullname.replace('.', '_')
subnodename = subnodefullname.replace('.', '_')
for _ in self._graph.get_edge_data(node,
subnode)['connect']:
dotlist.append('%s -> %s;' % (nodename,
subnodename))
logger.debug('connection: ' + dotlist[-1])
# add between workflow connections
for u, v, d in self._graph.edges_iter(data=True):
uname = '.'.join(hierarchy + [u.fullname])
vname = '.'.join(hierarchy + [v.fullname])
for src, dest in d['connect']:
uname1 = uname
vname1 = vname
if isinstance(src, tuple):
srcname = src[0]
else:
srcname = src
if '.' in srcname:
uname1 += '.' + '.'.join(srcname.split('.')[:-1])
if '.' in dest and '@' not in dest:
if not isinstance(v, Workflow):
if 'datasink' not in \
str(v._interface.__class__).lower():
vname1 += '.' + '.'.join(dest.split('.')[:-1])
else:
vname1 += '.' + '.'.join(dest.split('.')[:-1])
if uname1.split('.')[:-1] != vname1.split('.')[:-1]:
dotlist.append('%s -> %s;' % (uname1.replace('.', '_'),
vname1.replace('.', '_')))
logger.debug('cross connection: ' + dotlist[-1])
return ('\n' + prefix).join(dotlist)
|
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
from myPersonalFunctions import *
import glob
import numpy
parser = argparse.ArgumentParser(
description="Prepare the data for run and analysis. \
Codes here only need run once")
# parser.add_argument("protein", help="the name of protein")
# parser.add_argument("template", help="the name of template file")
parser.add_argument("-t", "--test", help="test ", action="store_true", default=False)
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("--distance", action="store_true", default=False)
parser.add_argument("--replace", action="store_true", default=False)
parser.add_argument("--summary", action="store_true", default=False)
parser.add_argument("--make_metadata", action="store_true", default=False)
parser.add_argument("--qnqc", action="store_true", default=False)
parser.add_argument("--data", action="store_true", default=False)
parser.add_argument("--continue_run", action="store_true", default=False)
parser.add_argument("-m", "--mode", type=int, default=1)
parser.add_argument("-s", "--switch", type=int, default=1)
args = parser.parse_args()
if(args.debug):
do = print
cd = print
else:
do = os.system
cd = os.chdir
# compute distance by "read dump file"
# if(args.test):
# for i in range(40):
# print(i)
# cd(str(i))
# do("read_dump_file.py")
# cd("..")
def replace(TARGET, FROM, TO):
do("sed -i.bak 's/{}/{}/g' {}".format(FROM,TO,TARGET))
def extract_data():
# do("tail -n+3 energy.log | awk '{print $NF}' > etotal")
do("awk '{print $17}' energy.dat > etotal")
do("head -n 6000 etotal | tail -n 2000 > etotal_half")
do("head -n 6000 qn | tail -n 2000 > qn_half")
do("head -n 6000 qc | tail -n 2000 > qc_half")
do("head -n 6000 qo | tail -n 2000 > qo_half")
do("paste qn qc etotal qo | tail -n 4000 > data")
do("paste qn_half qc_half etotal_half qo_half > halfdata")
server_run = """\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=02:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}
"""
if(args.test):
cd("simulation")
a = glob.glob("*")
print(a)
# for i in range(40):
# print(i)
# cd(str(i))
# do("paste qw energy | tail -n 4000 > q_e")
# # do("cp ~/opt/pulling/qo.slurm .")
# # do("sbatch qo.slurm")
# # extract_data()
# cd("..")
if(args.summary):
if(args.mode == 1):
with open("data", "w") as out:
out.write("step, qw, run, energy\n")
for i in range(40):
print(i)
with open(str(i)+"/halfdata") as f:
step = 0
for line in f:
step += 1
qn, qc, qw, energy = line.split()
out.write("{}, {}, run_{}, {}\n".format(step, qw, i, energy))
# out.write(str(n)+", "+qw+", run_"+str(i)+", "+energy+"\n"
if(args.mode == 2):
with open("data", "w") as out:
out.write("step, qw, run, energy\n")
for i in range(40):
print(i)
with open(str(i)+"/halfdata") as f:
step = 0
for line in f:
step += 1
qn, qc, energy, qw = line.split()
out.write("{}, {}, run_{}, {}\n".format(step, qw, i, energy))
# out.write(str(n)+", "+qw+", run_"+str(i)+", "+energy+"\n"
if(args.mode == 3):
cd("simulation")
with open("data", "w") as out:
out.write("step, qw, run\n")
for i in range(20):
print(i)
with open(str(i)+"/wham.dat") as f:
next(f)
for line in f:
step, qw, *rest = line.split()
out.write("{}, {}, run_{}\n".format(step, qw, i))
# out.write(str(n)+", "+qw+", run_"+str(i)+", "+energy+"\n"
if(args.mode == 4):
n = 40
with open("data", "w") as out:
out.write("step, qn, qc, dis, qw, run, energy\n")
for i in range(n):
print(i)
cd(str(i))
do("awk '{print $2}' addforce.dat > dis")
do("paste qn qc dis wham.dat| tail -n+2 | head -n 6000 > data")
# do("paste qn qc dis wham.dat| tail -n+2 > data")
cd("..")
with open(str(i)+"/data") as f:
for line in f:
qn, qc, dis, step, qw, *rest, energy = line.split()
out.write("{}, {}, {}, {}, {}, run_{}, {}\n".format(step, qn, qc, dis, qw, i, energy))
# out.write(str(n)+", "+qw+", run_"+str(i)+", "+energy+"\n"
if(args.mode == 5):
with open("data", "w") as out:
out.write("i, step, qw, target, run, energy\n")
q = 0.1
for i in range(40):
print(i)
q += 0.02
count = i*6000
with open(str(i)+"/wham.dat") as f:
next(f)
for line in f:
count += 1
step, qw, *rest, energy = line.split()
out.write("{}, {}, {}, {}, {}, {}\n".format(count, step, qw, q, i, energy))
# out.write(str(n)+", "+qw+", run_"+str(i)+", "+energy+"\n"
if(args.qnqc):
if(args.mode == 4):
n = 20
# temp_list = [300,350]
# temp_list = [250,275, 325]
# temp_list = [200]
# temp_list = [0, 1, 2]
# temp_list = [3]
# temp_list = [4, 5]
# temp_list = [6]
# temp_list = [7]
# temp_list = [8, 9]
temp_list = [2]
run_list = [0]
# run_list = [0, 1]
# temp_list = [1]
# run_list = [10, 11, 12, 13]
# # run_list = [2]
# run_list = [3, 4, 5]
# temp_list = ['300', '200', '250']
cwd = os.getcwd()
for temp in run_list:
for i in range(n):
cd("simulation/{}/{}".format(i, temp))
do("cp ../2xov.pdb .")
do("cp ~/opt/pulling/qnqc.slurm .")
do("sbatch qnqc.slurm")
# with open("server_run.slurm", "w") as f:
# f.write(server_run.format("read_dump_file.py"))
# do("sbatch server_run.slurm")
cd(cwd)
if(args.mode == 1):
n = 20
# temp_list = [300,350]
# temp_list = [250,275, 325]
# temp_list = [200]
temp_list = [135, 160, 185, 210]
# temp_list = ['300', '200', '250']
cwd = os.getcwd()
for temp in temp_list:
for i in range(n):
cd("simulation/{}/{}".format(temp, i))
do("cp 2xov_translocon.pdb 2xov.pdb")
do("cp ~/opt/pulling/qnqc.slurm .")
do("sbatch qnqc.slurm")
# with open("server_run.slurm", "w") as f:
# f.write(server_run.format("read_dump_file.py"))
# do("sbatch server_run.slurm")
cd(cwd)
if(args.mode == 2):
array = []
cwd = os.getcwd()
print(cwd)
with open('folder_list', 'r') as ins:
for line in ins:
target = line.strip('\n')
t1 = "simulation/" + target + "/"
array.append(t1)
# t2 = "simulation/" + target + "/simulation/1"
# array.append(t2)
for i in array:
os.chdir(i)
os.system("pwd")
os.system("cp ~/opt/pulling/qnqc.slurm .")
os.system("sbatch qnqc.slurm")
os.chdir(cwd)
if(args.mode == 3):
n = 40
cwd = os.getcwd()
for i in range(n):
# cd("simulation/{}".format(i))
cd("{}".format(i))
do("cp ~/opt/pulling/qnqc.slurm .")
do("sbatch qnqc.slurm")
# with open("server_run.slurm", "w") as f:
# f.write(server_run.format("read_dump_file.py"))
# do("sbatch server_run.slurm")
cd(cwd)
if(args.data):
if(args.mode == 8):
n = 20
# temp_list = [300,350]
# temp_list = [250,275, 325]
# temp_list = [200]
# run_list = [0, 1, 2]
# run_list = [3]
# run_list = [4, 5]
# run_list = [6]
run_list = [7, 8, 9]
run_list = [0, 1, 2]
run_list = [0]
# run_list = [0, 1]
# run_list = [10, 11, 12, 13]
# run_list = [2]
# run_list = [3, 4, 5]
# temp_list = ['300', '200', '250']
cwd = os.getcwd()
for run in run_list:
for i in range(n):
cd("simulation/{}/{}".format(i, run))
do("awk '{print $1}' addforce.dat > steps")
do("awk '{print $2}' addforce.dat > distance")
do("awk '{print $17}' energy.dat > energy")
do("paste -d, steps distance qn qc energy > data")
# do("paste -d, steps distance distance distance energy > data")
cd(cwd)
# with open("server_run.slurm", "w") as f:
# f.write(server_run.format("read_dump_file.py"))
# do("sbatch server_run.slurm")
if(args.mode == 7):
n = 20
temp_list = ['300', '200', '250']
cwd = os.getcwd()
for temp in temp_list:
for i in range(n):
print(str(i))
cd("simulation/{}/{}".format(temp, i))
do("awk '{print $2}' wham.dat > qw")
do("awk '{print $6}' wham.dat > energy")
do("paste qn qc qw energy | tail -n 2000 > data")
# do("tail -n 2000 data > small_data")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $2}' > qw")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $5}' > e")
# do("head -n 5800 qn | tail -n 4000 > qn_half")
# do("head -n 5800 qc | tail -n 4000 > qc_half")
# do("paste qn qc | head -n 5800 | tail -n 4000 > qnqc")
cd(cwd)
if(args.mode == 6):
n = 20
temp_list = [135, 160, 185, 210]
cwd = os.getcwd()
for temp in temp_list:
for i in range(n):
print(str(i))
cd("simulation/{}/{}".format(temp, i))
do("awk '{print $2}' wham.dat > qw")
do("awk '{print $6}' wham.dat > energy")
do("paste qn qc qw energy | tail -n 10000 > data")
do("tail -n 2000 data > small_data")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $2}' > qw")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $5}' > e")
# do("head -n 5800 qn | tail -n 4000 > qn_half")
# do("head -n 5800 qc | tail -n 4000 > qc_half")
# do("paste qn qc | head -n 5800 | tail -n 4000 > qnqc")
cd(cwd)
if(args.mode == 5):
target = "all_halfdata"
do("awk '{print $1}' %s > qn" % (target))
do("awk '{print $2}' %s > qc" % (target))
do("awk '{print $3}' %s > p_total" % (target))
do("awk '{print $4}' %s > e_total" % (target))
if(args.mode == 4):
n = 40
temp_list = [250, 275, 300, 325, 350]
cwd = os.getcwd()
target = "multi_temp"
for temp in temp_list:
cd("simulation/{}".format(temp))
do("ls */halfdata | sort -g | xargs cat > data")
do("awk '{print $1}' data > ../../%s/qn_t%i" % (target, temp))
do("awk '{print $2}' data > ../../%s/qc_t%i" % (target, temp))
do("awk '{print $3}' data > ../../%s/q_t%i" % (target, temp))
do("awk '{print $4}' data > ../../%s/energy_t%i" % (target, temp))
cd(cwd)
# cd(target)
# with open("sim_list", "w") as f:
# for temp in temp_list:
# f.write("t{}\n".format(temp))
# with open("T_list", "w") as f:
# for temp in temp_list:
# f.write("{}\n".format(temp))
# cd(cwd)
if(args.mode == 3):
n = 40
# temp_list = [250, 275, 300, 325, 350]
temp_list = [200]
temp_list = [135, 160, 185, 210]
cwd = os.getcwd()
for temp in temp_list:
for i in range(n):
print(str(i))
cd("simulation/{}/{}".format(temp, i))
do("awk '{print $2}' wham.dat > qw")
do("awk '{print $6}' wham.dat > energy")
do("paste qn qc qw energy | tail -n 4000 > halfdata")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $2}' > qw")
# do("head -n 5800 wham.dat | tail -n 4000 | awk '{print $5}' > e")
# do("head -n 5800 qn | tail -n 4000 > qn_half")
# do("head -n 5800 qc | tail -n 4000 > qc_half")
# do("paste qn qc | head -n 5800 | tail -n 4000 > qnqc")
cd(cwd)
if(args.mode == 1):
n = 40
cwd = os.getcwd()
for i in range(n):
cd("simulation/300/{}".format(i))
do("tail -n+3 energy.log | awk '{print $NF}' > energy")
do("paste qn qc distance energy | tail 4000 > halfdata")
cd(cwd)
if(args.mode == 2):
array = []
cwd = os.getcwd()
print(cwd)
with open('folder_list', 'r') as ins:
for line in ins:
target = line.strip('\n')
t1 = "simulation/" + target + "/"
array.append(t1)
# t2 = "simulation/" + target + "/simulation/1"
# array.append(t2)
for i in array:
os.chdir(i)
os.system("pwd")
do("tail -n+3 energy.log | awk '{print $NF}' > energy")
do("sed '/^#/ d' x.colvars.traj | awk '{print $2}' > x")
do("awk '{print $2}' wham.dat > qw")
# do("sed '/^#/ d' x.colvars.traj | awk 'NR % 10 == 1' | awk '{print $2}' > x")
do("paste qn qc x energy qw| tail -n 4000 > halfdata")
do("paste qn qc x energy qw -d ',' | tail -n 4000 > test_data")
# do("sed -i '1iqn,qc,x,energy' test_data")
os.chdir(cwd)
if(args.make_metadata):
if(args.mode == 5):
kconstant = 300 # double the k constant
q0 = 0.0
metadata = open("metadatafile", "w")
for i in range(20):
q = q0 + i*0.05
# temp_list = [135, 160, 185, 210]
temp_list = [160]
for temp in temp_list:
target = "../simulation/{}/".format(temp) + str(i) + "/small_data {} {} {:.2f}\n".format(temp, kconstant, q)
metadata.write(target)
metadata.close()
if(args.mode == 4):
kconstant = 800 # double the k constant
q0 = 0.12
metadata = open("metadatafile", "w")
for i in range(40):
q = q0 + i*0.02
temp_list = [250, 275, 300, 325, 350]
for temp in temp_list:
target = "../simulation/300/" + str(i) + "/halfdata {} {} {:.2f}\n".format(temp, kconstant, q)
metadata.write(target)
metadata.close()
if(args.mode == 1):
kconstant = 2000 # double the k constant
temp = 350
q0 = 0.12
metadata = open("metadatafile", "w")
for i in range(40):
q = q0 + i*0.02
target = "../simulation/350/" + str(i) + "/halfdata {} {} {:.2f}\n".format(temp, kconstant, q)
# target = "../simulation/350/" + str(i) + "/data {} {} {:.2f}\n".format(temp, kconstant, q)
metadata.write(target)
metadata.close()
elif(args.mode == 2):
kconstant = 0.02 # double the k constant
metadata = open("metadatafile", "w")
with open('../folder_list', 'r') as ins:
for line in ins:
target = line.strip(' \n')
temp = target.split("_")[1]
x = target.split("_")[3]
# print(temp)
cwd = os.getcwd()
t1 = cwd + "/../simulation/" + target + "/halfdata {} {} {}\n".format(temp, kconstant, x)
metadata.write(t1)
# elif(args.mode == 2):
# t2 = "/scratch/wl45/freeEnergy_2xov/pullingDistance/simulation/" + target + "/simulation/1/halfdata {} {} {}\n".format(temp, kconstant, x)
# metadata.write(t2)
metadata.close()
elif(args.mode == 3):
kconstant = 0.04 # double the k constant
metadata = open("metadatafile", "w")
with open('../folder_list', 'r') as ins:
for line in ins:
target = line.strip(' \n')
temp = target.split("_")[1]
x = target.split("_")[3]
cwd = os.getcwd()
t1 = cwd + "/../simulation/" + target + "/halfdata {} {} {}\n".format(temp, kconstant, x)
metadata.write(t1)
# elif(args.mode == 2):
# t2 = "/scratch/wl45/freeEnergy_2xov/pullingDistance/simulation/" + target + "/simulation/1/halfdata {} {} {}\n".format(temp, kconstant, x)
# metadata.write(t2)
metadata.close()
if(args.replace):
target = "2xov.in"
replace(target, "TEMPERATURE", "300")
replace(target, "RANDOM", str(randint(1, 10**6)))
if(args.distance):
do("read_dump_file.py")
if(args.continue_run):
folder_name = "continue_simulation_2"
folder_name = "continue_simulation"
do("mkdir {}".format(folder_name))
# do("mkdir continue_simulation")
cd(folder_name)
n = 40
simulation_steps = 6*1000*1000
protein_name = "2xov"
for i in range(n):
do("mkdir {}".format(i))
cd(str(i))
do("cp -r ../../2xov/* .")
# do("cp ../../continue_simulation/{0}/restart.12000000 .".format(i))
do("cp ../../simulation/{0}/restart.6000000 .".format(i))
do("cp ~/opt/pulling/2xov_continue_run.in 2xov.in")
# do(
# "sed -i.bak 's/START_FROM/'" +
# "12000000" +
# "'/g' "+protein_name+".in")
do(
"sed -i.bak 's/START_FROM/'" +
"6000000" +
"'/g' "+protein_name+".in")
seed(datetime.now())
do( # replace RANDOM with a radnom number
"sed -i.bak 's/RANDOM/'" +
str(randint(1, 10**6)) +
"'/g' "+protein_name+".in")
do( # replace SIMULATION_STEPS with specific steps
"sed -i.bak 's/SIMULATION_STEPS/'" +
str(simulation_steps) +
"'/g' "+protein_name+".in")
cd("..")
|
|
# -*- coding: utf-8 -*-
# dcf
# ---
# A Python library for generating discounted cashflows.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.4, copyright Saturday, 10 October 2020
# Website: https://github.com/sonntagsgesicht/dcf
# License: Apache License 2.0 (see LICENSE file)
from abc import ABC
from sys import float_info
from .curve import RateCurve
from .compounding import continuous_compounding, continuous_rate
from .interpolation import constant, linear, logconstantrate, loglinearrate, neglogconstant, negloglinear
from . import dyn_scheme
class CreditCurve(RateCurve, ABC):
""" generic curve for default probabilities (under construction) """
_forward_tenor = '1Y'
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if isinstance(domain, RateCurve):
# if argument is a curve add extra curve points to domain for better approximation
if data:
raise TypeError("If first argument is %s, data argument must not be given." % domain.__class__.__name__)
data = domain
domain = sorted(set(list(data.domain) + [max(data.domain) + '1y']))
super(CreditCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
def get_survival_prob(self, start, stop=None): # aka get_discount_factor
if stop is None:
return self.get_survival_prob(self.origin, start)
return self._get_compounding_factor(start, stop)
def get_flat_intensity(self, start, stop=None): # aka get_zero_rate
if stop is None:
return self.get_flat_intensity(self.origin, start)
return self._get_compounding_rate(start, stop)
def get_hazard_rate(self, start): # aka get_short_rate
if start < min(self.domain):
return self.get_hazard_rate(min(self.domain))
if max(self.domain) <= start:
return self.get_hazard_rate(max(self.domain) - self.__class__._time_shift)
previous = max(d for d in self.domain if d <= start)
follow = min(d for d in self.domain if start < d)
if not previous <= start <= follow:
raise AssertionError()
if not previous < follow:
raise AssertionError(list(map(str, (previous, start, follow))))
return self.get_flat_intensity(previous, follow)
class ProbabilityCurve(CreditCurve, ABC):
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
# validate probabilities
if not isinstance(data, RateCurve):
data = [max(float_info.min, min(d, 1. - float_info.min)) for d in data]
if not all(data):
raise ValueError('Found non positive survival probabilities.')
# if argument is a curve add extra curve points to domain for better approximation
if isinstance(domain, RateCurve):
if data:
raise TypeError("If first argument is %s, data argument must not be given." % domain.__class__.__name__)
data = domain
origin = data.origin if origin is None else origin
domain = sorted(set(list(data.domain) + [origin + '1d', max(data.domain) + '1y']))
super(ProbabilityCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
class SurvivalProbabilityCurve(ProbabilityCurve):
_interpolation = dyn_scheme(logconstantrate, loglinearrate, logconstantrate)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(curve.origin, x)
def _get_compounding_factor(self, start, stop):
if start is self.origin:
return self(stop)
if start == stop:
return 1. if 2*float_info.min <= self(start) else 0.
return self(stop) / self(start)
def _get_compounding_rate(self, start, stop):
if start == stop == self.origin:
# intensity proxi at origin
stop = min(d for d in self.domain if self.origin < d)
# todo: calc left extrapolation (for linear zero rate interpolation)
return super(SurvivalProbabilityCurve, self)._get_compounding_rate(start, stop)
class DefaultProbabilityCurve(SurvivalProbabilityCurve):
""" wrapper of SurvivalProbabilityCurve """
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(curve.origin, x)
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if not isinstance(data, RateCurve):
data = [1. - d for d in data]
super(DefaultProbabilityCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
class FlatIntensityCurve(CreditCurve):
_interpolation = dyn_scheme(constant, linear, constant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_flat_intensity(curve.origin, x)
def _get_compounding_rate(self, start, stop):
if start == stop == self.origin:
return self(self.origin)
if start is self.origin:
return self(stop)
if start == stop:
return self._get_compounding_rate(start, start + self.__class__._time_shift)
s = self(start) * self.day_count(self.origin, start)
e = self(stop) * self.day_count(self.origin, stop)
t = self.day_count(start, stop)
return (e - s) / t
class HazardRateCurve(CreditCurve):
_interpolation = dyn_scheme(constant, constant, constant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_hazard_rate(x)
def _get_compounding_rate(self, start, stop):
if start == stop:
return self(start)
current = start
rate = 0.0
step = self.__class__._time_shift
while current + step < stop:
rate += self(current) * self.day_count(current, current + step)
current += step
rate += self(current) * self.day_count(current, stop)
return rate / self.day_count(start, stop)
def get_hazard_rate(self, start): # aka get_short_rate
return self(start)
class MarginalSurvivalProbabilityCurve(ProbabilityCurve):
_interpolation = dyn_scheme(neglogconstant, negloglinear, neglogconstant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(x, x + curve.forward_tenor)
def _get_compounding_factor(self, start, stop):
if start == stop:
return 1. if 2*float_info.min <= self(start) else 0.
current = start
df = 1.0
step = self.forward_tenor
while current + step < stop:
df *= self(current) if 2 * float_info.min <= self(current) else 0.
current += step
if 2 * float_info.min <= self(current):
r = continuous_rate(self(current), self.day_count(current, current + step))
df *= continuous_compounding(r, self.day_count(current, stop))
else:
df *= 0.
return df
def get_hazard_rate(self, start): # aka get_short_rate
if start < min(self.domain):
return self.get_hazard_rate(min(self.domain))
if max(self.domain) <= start:
return self.get_flat_intensity(max(self.domain), max(self.domain) + self.__class__._time_shift)
previous = max(d for d in self.domain if d <= start)
follow = min(d for d in self.domain if start < d)
if not previous < follow:
raise AssertionError(list(map(str, (previous, start, follow))))
if not previous <= start <= follow:
raise AssertionError(list(map(str, (previous, start, follow))))
return self.get_flat_intensity(previous, follow)
class MarginalDefaultProbabilityCurve(MarginalSurvivalProbabilityCurve):
""" wrapper of SurvivalProbabilityCurve """
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(x, x + curve.forward_tenor)
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if not isinstance(data, RateCurve):
data = [1. - d for d in data]
super(MarginalDefaultProbabilityCurve, self).__init__(
domain, data, interpolation, origin, day_count, forward_tenor)
|
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
import datetime
import logging
import os
import sys
from urlparse import urlparse
from .framework_base import Framework_Base
from ..util.dates import naiveUTC
from ..util.dossl import _do_ssl
from ..util.handler_utils import _get_user_urn
from ..util import credparsing as credutils
from ...geni.util.urn_util import is_valid_urn, URN, string_to_urn_format
from ...sfa.util.xrn import get_leaf
# The key is a converted pkcs12 file. Start with your ProtoGENI
# encrypted.p12 file (found in the .ssl directory or downloaded
# from the emulab site web page). Then convert it to pem using
# openssl:
#
# $ openssl pkcs12 -in encrypted.p12 -out pgcert.pem -nodes
#
# That command will create a pgcert.pem file, which contains
# the private key you need. This resulting key is not password
# protected. See the openssl pkcs12 man page for more info.
class Framework(Framework_Base):
"""The ProtoGENI backend for Omni. This class defines the
interface to the Protogeni Control Framework.
"""
def __init__(self, config, opts):
Framework_Base.__init__(self, config)
fwtype = "PG"
self.fwtype = fwtype
self.opts = opts
self.logger = logging.getLogger("omni.protogeni")
config['cert'] = os.path.expanduser(config['cert'])
if not os.path.exists(config['cert']):
sys.exit('%s Framework certfile %s doesnt exist' % (fwtype, config['cert']))
if not os.path.getsize(config['cert']) > 0:
sys.exit('%s Framework certfile %s is empty' % (fwtype, config['cert']))
config['key'] = os.path.expanduser(config['key'])
if not os.path.exists(config['key']):
sys.exit('%s Framework keyfile %s doesnt exist' % (fwtype, config['key']))
if not os.path.getsize(config['key']) > 0:
sys.exit('%s Framework keyfile %s is empty' % (fwtype, config['key']))
if not config.has_key('verbose'):
config['verbose'] = False
else:
config['verbose'] = config['verbose'].lower().strip() in ['true', '1', 't', 'yes', 'on', 'y']
if opts.verbosessl:
self.logger.debug('Setting Verbose SSL logging based on option')
config['verbose'] = True
if config['verbose']:
self.logger.info('Verbose logging is on')
self.config = config
self.logger.debug("Configured with key file %s", config['key'])
self.logger.debug('Using clearinghouse %s', self.config['ch'])
self.ch = self.make_client(self.config['ch'], self.key, self.cert,
self.config['verbose'], opts.ssltimeout)
self.logger.debug('Using slice authority %s', self.config['sa'])
self.sa = self.make_client(self.config['sa'], self.key, self.cert,
self.config['verbose'], opts.ssltimeout)
self.user_cred = self.init_user_cred( opts )
# For now, no override aggregates.
self.aggs = None
# Hardcode the PG in ELab instance because it does not
# show up in the clearinghouse.
#self.aggs = {
# Tom's inner emulab
#'urn:publicid:IDN+elabinelab.geni.emulab.net':
# 'https://myboss.elabinelab.geni.emulab.net:443/protogeni/xmlrpc/am'
# Leigh's inner emulab
# 'urn:publicid:IDN+myelab.testbed.emulab.net':
# 'https://myboss.myelab.testbed.emulab.net:443/protogeni/xmlrpc/am'
# Utah ProtoGENI
#'urn:publicid:IDN+emulab.net':
#'https://boss.emulab.net:443/protogeni/xmlrpc/am'
#}
def _get_log_url(self, response):
url = None
if not response or not isinstance(response, dict) or not response.has_key('protogeni_error_url'):
return url
return response['protogeni_error_url']
def get_user_cred(self):
message = ""
if self.user_cred == None:
self.logger.debug("Getting user credential from %s SA %s", self.fwtype, self.config['sa'])
pg_response = dict()
# Next 2 lines for debugging only
#params = {'cert': self.config['cert']}
#(pg_response, message) = _do_ssl(self, None, ("Get %s user credential from SA %s using cert %s" % (self.fwtype, self.config['sa'], self.config['cert'])), self.sa.GetCredential, params)
(pg_response, message) = _do_ssl(self, None, ("Get %s user credential from SA %s using cert %s" % (self.fwtype, self.config['sa'], self.config['cert'])), self.sa.GetCredential)
_ = message #Appease eclipse
if pg_response is None:
self.logger.error("Failed to get your %s user credential: %s", self.fwtype, message)
# FIXME: Return error message?
return None, message
code = pg_response['code']
log = self._get_log_url(pg_response)
if code:
self.logger.error("Failed to get a %s user credential: Received error code: %d", self.fwtype, code)
output = pg_response['output']
self.logger.error("Received error message: %s", output)
if message is None or message == "":
message = output
else:
message = message + "; " + output
if log:
self.logger.error("See log: %s", log)
#return None
else:
self.user_cred = pg_response['value']
if log:
self.logger.debug("%s log url: %s", self.fwtype, log)
return self.user_cred, message
def get_slice_cred(self, urn):
mycred, message = self.get_user_cred()
if mycred is None:
self.logger.error("Cannot get %s slice %s without a user credential: %s", self.fwtype, urn, message)
return None
# Note params may be used again later in this method
params = {'credential': mycred,
'type': 'Slice',
'urn': urn}
self.logger.debug("Resolving %s at slice authority", urn)
(response, message) = _do_ssl(self, None, ("Resolve %s slice %s at SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.Resolve, params)
# response is a dict with three keys: code, value and output
self.logger.debug("Got resolve response %r", response)
if response is None:
raise Exception("Failed to find %s slice %s: %s" % (self.fwtype, urn, message))
log = self._get_log_url(response)
if log:
self.logger.debug("%s resolve slice log: %s", self.fwtype, log)
if response['code']:
# Unable to resolve, slice does not exist
raise Exception('Cannot access %s slice %s (does not exist or you are not a member).' % (self.fwtype, urn))
else:
# Slice exists, get credential and return it
self.logger.debug("Resolved slice %s, getting credential", urn)
(response, message) = _do_ssl(self, None, ("Get %s slice credential for %s from SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.GetCredential, params)
if response is None:
raise Exception("Failed to get %s slice %s credential: %s" % (self.fwtype, urn, message))
log = self._get_log_url(response)
# When the CM is busy, it returns error 14: 'slice is busy; try again later'
# FIXME: All server calls should check for that 'try again later' and retry,
# as dossl does when the AM raises that message in an XMLRPC fault
if response['code']:
if log:
self.logger.error("%s GetCredential for slice log: %s", self.fwtype, log)
raise Exception("Failed to get %s slice %s credential: Error: %d, Message: %s" % (self.fwtype, urn, response['code'], response['output']))
if not response.has_key('value'):
self.logger.debug("Got GetCredential response %r", response)
if log:
self.logger.error("%s GetCredential for slice log: %s", self.fwtype, log)
raise Exception("Failed to get valid %s slice credential for %s. Response had no value." % (self.fwtype, urn))
if not type(response['value']) is str:
self.logger.debug("Got GetCredential response %r", response)
if log:
self.logger.error("%s GetCredential for slice log: %s", self.fwtype, log)
raise Exception("Failed to get valid %s slice credential for %s. Got non string: %r" % (self.fwtype, urn, response['value']))
if log:
self.logger.debug("%s GetCredential for slice log: %s", self.fwtype, log)
return response['value']
def slice_name_to_urn(self, name):
"""Convert a slice name to a slice urn."""
#
# Sample URNs:
# urn:publicid:IDN+pgeni3.gpolab.bbn.com+slice+tom1
# urn:publicid:IDN+elabinelab.geni.emulab.net+slice+tom1
#
if name is None or name.strip() == '':
raise Exception('Empty slice name')
# Could use is_valid_urn_bytype here, or just let the SA/AM do the check
if is_valid_urn(name):
urn = URN(None, None, None, name)
if not urn.getType() == "slice":
raise Exception("Invalid Slice name: got a non Slice URN %s", name)
# if config has an authority, make sure it matches
if self.config.has_key('sa'):
url = urlparse(self.config['sa'])
sa_host = url.hostname
try:
auth = sa_host[sa_host.index('.')+1:]
except:
# funny SA?
self.logger.debug("Found no . in sa hostname. Using whole hostname")
auth = sa_host
urn_fmt_auth = string_to_urn_format(urn.getAuthority())
if urn_fmt_auth != auth:
self.logger.warn("CAREFUL: slice' authority (%s) doesn't match current configured authority (%s)" % (urn_fmt_auth, auth))
self.logger.info("This may be OK though if you are using delegated slice credentials...")
# raise Exception("Invalid slice name: slice' authority (%s) doesn't match current configured authority (%s)" % (urn_fmt_auth, auth))
return name
if not self.config.has_key('sa'):
raise Exception("Invalid configuration: no slice authority (sa) defined")
url = urlparse(self.config['sa'])
sa_host = url.hostname
try:
auth = sa_host[sa_host.index('.')+1:]
except:
# Funny SA
self.logger.debug("Found no . in sa hostname. Using whole hostname")
auth = sa_host
return URN(auth, "slice", name).urn_string()
def create_slice(self, urn):
"""Create a slice at the PG Slice Authority.
If the slice exists, just return a credential for the existing slice.
If the slice does not exist, create it and return a credential.
"""
mycred, message = self.get_user_cred()
if mycred is None:
self.logger.error("Cannot create a %s slice without a valid user credential: %s", self.fwtype, message)
return None
# Note: params is used again below through either code path.
params = {'credential': mycred,
'type': 'Slice',
'urn': urn}
self.logger.debug("Resolving %s at slice authority", urn)
(response, message) = _do_ssl(self, None, ("Look up slice %s at %s slice authority %s" % (urn, self.fwtype, self.config['sa'])), self.sa.Resolve, params)
# response is a dict with three keys: code, value and output
self.logger.debug("Got resolve response %r", response)
if response is None:
#exception trying to resolve the slice is not the same as a PG error
self.logger.error("Failed to resolve slice %s at %s slice authority: %s" % (urn, self.fwtype, message))
# FIXME: Return error message?
return None
elif response['code']:
# Unable to resolve, create a new slice
self.logger.debug("Creating new slice %s", urn)
(response, message) = _do_ssl(self, None, ("Create %s slice %s at SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.Register, params)
self.logger.debug("Got register response %r", response)
if response is None:
self.logger.error("Failed to create new %s slice %s: %s", self.fwtype, urn, message)
# FIXME: Return an error message?
return None
log = self._get_log_url(response)
if response['code']:
if response['code'] == 3 and 'Unknown project' in response['output']:
self.logger.error("Unknown project in slice URN '%s'. Project names are case sensitive. Did you mis-type or mis-configure Omni?" % urn)
self.logger.debug('Failed to create new %s slice %s: %s (code %d)', self.fwtype, urn, response['output'], response['code'])
elif response['code'] == 5 or \
response['output'].startswith("[DUPLICATE] DUPLICATE_ERROR"):
self.logger.error("Failed to create slice '%s' because a similarly named slice already exists. Slice names are case insensitive at creation time.", urn)
self.logger.debug('Failed to create new %s slice %s: %s (code %d)', self.fwtype, urn, response['output'], response['code'])
else:
self.logger.error('Failed to create new %s slice %s: %s (code %d)', self.fwtype, urn, response['output'], response['code'])
if log:
self.logger.info("%s log url: %s", self.fwtype, log)
elif log:
self.logger.debug("%s log url: %s", self.fwtype, log)
return response['value']
else:
# Slice exists, get credential and return it
self.logger.debug("Resolved slice %s, getting credential", urn)
(response, message) = _do_ssl(self, None, ("Get %s slice %s credential from SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.GetCredential, params)
if response is None:
self.logger.error("Failed to get credential for existing %s slice %s", self.fwtype, urn)
# FIXME: Return an error message?
return None
log = self._get_log_url(response)
if response['code']:
self.logger.error('Failed to get credential for existing %s slice %s: %s (code %d)', self.fwtype, urn, response['output'], response['code'])
if log:
self.logger.info("%s log url: %s", self.fwtype, log)
elif log:
self.logger.debug("%s log url: %s", self.fwtype, log)
if not response.has_key('value'):
self.logger.debug("Got GetCredential response %r", response)
raise Exception("Failed to get valid %s slice credential for %s. Response had no value." % (self.fwtype, urn))
if not type(response['value']) is str:
self.logger.debug("Got GetCredential response %r", response)
raise Exception("Failed to get valid %s slice credential for %s. Got non string: %r" % (self.fwtype, urn, response['value']))
return response['value']
def delete_slice(self, urn):
"""Delete the PG Slice. PG doesn't do this though, so instead we
return a string including the slice expiration time.
"""
mycred, message = self.get_user_cred()
_ = message # Appease eclipse
if mycred is None:
prtStr = "Cannot get a valid user credential. Regardless, %s slices cannot be deleted - they expire automatically." % self.fwtype
self.logger.error(prtStr)
return prtStr
# Note: params is used again below through either code path.
params = {'credential': mycred,
'type': 'Slice',
'urn': urn}
(response, message) = _do_ssl(self, None, ("Get %s Slice %s credential from SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.GetCredential, params)
if response is None or response['code']:
msg = "Cannot confirm %s slice exists. Regardless, %s slices cannot be deleted - they expire automatically. Unable to get slice credential for slice %s: %s"
if response is None:
msg = msg % (self.fwtype, self.fwtype, urn, message)
else:
msg = msg % (self.fwtype, self.fwtype, urn, response['output'])
self.logger.warning(msg)
return msg
else:
slice_cred = response['value']
# If we get here the slice exists and we have the credential
slicecred_exp = credutils.get_cred_exp(self.logger, slice_cred)
return '%s does not support deleting slices. Slice %s will be automatically removed when it expires at %s UTC.' % (self.fwtype, urn, slicecred_exp)
def list_my_slices(self, user):
slice_list = self._list_my_slices( user )
return slice_list
def list_ssh_keys(self, username=None):
if username is not None and username.strip() != "":
name = get_leaf(_get_user_urn(self.logger, self.config))
if name != get_leaf(username):
return None, "%s can get SSH keys for current user (%s) only, not %s" % (self.fwtype, name, username)
key_list, message = self._list_ssh_keys()
return key_list, message
def list_aggregates(self):
if self.aggs:
return self.aggs
cm_dicts = self._get_components()
if cm_dicts is None:
cm_dicts = []
am_dicts = self._find_geni_ams(cm_dicts)
if am_dicts is None:
am_dicts = []
result = dict()
for am_dict in am_dicts:
self.logger.debug("Keys: %r", am_dict.keys())
result[am_dict['urn']] = am_dict['am_url']
for key, value in result.items():
self.logger.debug('Found aggregate %r: %r', key, value)
return result
def renew_slice(self, urn, expiration_dt):
"""See framework_base for doc.
"""
mycred, message = self.get_user_cred()
if mycred is None:
self.logger.error("Cannot renew slice %s without a valid user credential: %s", urn, message)
return None
# Note: params is used again below through either code path.
params = {'credential': mycred,
'type': 'Slice',
'urn': urn}
(response, message) = _do_ssl(self, None, ("Get %s Slice %s credential from SA %s" % (self.fwtype, urn, self.config['sa'])), self.sa.GetCredential, params)
if response is None or response['code']:
msg = "Cannot renew slice. Unable to get slice credential for slice %s: %s"
if response is None:
msg = msg % (urn, message)
else:
log = self._get_log_url(response)
if log:
msg = msg % (urn, response['output'] + (". %s log url: %s" % (self.fwtype, log)))
else:
msg = msg % (urn, response['output'])
self.logger.warning(msg)
return None
else:
log = self._get_log_url(response)
if log:
self.logger.debug("%s slice GetCredential log: %s", self.fwtype, log)
slice_cred = response['value']
expiration = naiveUTC(expiration_dt).isoformat()
self.logger.info('Requesting new slice expiration %r', expiration)
params = {'credential': slice_cred,
'expiration': expiration}
(response, message) = _do_ssl(self, None, ("Renew slice %s at SA %s" % (urn, self.config['sa'])), self.sa.RenewSlice, params)
if response is None or response['code']:
# request failed, print a message and return None
msg = "Failed to renew slice %s: %s"
if response is None:
msg = msg % (urn, message)
else:
log = self._get_log_url(response)
if log:
msg = msg % (urn, ("%s SA said: " % self.fwtype) + response['output'] + (". %s log url: %s" % (self.fwtype, log)))
else:
msg = msg % (urn, ("%s SA said: " % self.fwtype) + response['output'])
self.logger.warning(msg)
return None
else:
# Success. requested expiration worked, return it.
log = self._get_log_url(response)
if log:
self.logger.debug("%s RenewSlice log: %s", self.fwtype, log)
# response['value'] is the new slice
# cred. parse the new expiration date out of
# that and return that
sliceexp = naiveUTC(credutils.get_cred_exp(self.logger, response['value']))
# If request is diff from sliceexp then log a warning
if abs(sliceexp - naiveUTC(expiration_dt)) > datetime.timedelta.resolution:
self.logger.warn("Renewed %s slice %s expiration %s different than request %s", self.fwtype, urn, sliceexp, expiration_dt)
return sliceexp
# def _get_slices(self):
# """Gets the ProtoGENI slices from the ProtoGENI
# Clearinghouse. Returns a list of dictionaries as documented
# in https://www.protogeni.net/trac/protogeni/wiki/ClearingHouseAPI2#List
# """
# cred, message = self.get_user_cred()
# if not cred:
# raise Exception("No user credential available. %s" % message)
# pg_response = self.ch.List({'credential': cred, 'type': 'Slices'})
# code = pg_response['code']
# if code:
# self.logger.error("Received error code: %d", code)
# output = pg_response['output']
# self.logger.error("Received error message: %s", output)
# # Return an empty list.
# return list()
# # value is a list of dicts, each containing info about an aggregate
# return pg_response['value']
def _list_my_slices(self, user):
"""Gets the ProtoGENI slices from the ProtoGENI Slice Authority. """
cred, message = self.get_user_cred()
if not cred:
raise Exception("No user credential available. %s" % message)
(pg_response, message) = _do_ssl(self, None, "Resolve user %s at %s SA %s" % (user, self.fwtype, self.config['sa']), self.sa.Resolve, {'credential': cred, 'type': 'User', 'hrn': user})
if pg_response is None:
self.logger.error("Cannot list slices: %s", message)
raise Exception(message)
# return list()
log = self._get_log_url(pg_response)
code = pg_response['code']
if code:
self.logger.error("Received error code: %d", code)
output = pg_response['output']
self.logger.error("Received error message from %s: %s", self.fwtype, output)
msg = "Error %d: %s" % (code, output)
if log:
self.logger.error("%s log url: %s", self.fwtype, log)
raise Exception(msg)
# # Return an empty list.
# return list()
# Resolve keys include uuid, slices, urn, subauthorities, name, hrn, gid, pubkeys, email, uid
# value is a dict, containing a list of slice URNs
return pg_response['value']['slices']
def _list_ssh_keys(self, userurn = None):
"""Gets the ProtoGENI stored SSH public keys from the ProtoGENI Slice Authority. """
cred, message = self.get_user_cred()
if not cred:
raise Exception("No user credential available. %s" % message)
usr = 'current user'
if userurn is not None:
usr = 'user ' + get_leaf(userurn)
(pg_response, message) = _do_ssl(self, None, "Get %s SSH Keys at %s SA %s" % (usr, self.fwtype, self.config['sa']), self.sa.GetKeys, {'credential': cred, 'member_urn': userurn})
else:
(pg_response, message) = _do_ssl(self, None, "Get %s SSH Keys at %s SA %s" % (usr, self.fwtype, self.config['sa']), self.sa.GetKeys, {'credential': cred})
if pg_response is None:
msg = "Cannot get %s's public SSH keys: %s" % (usr, message)
self.logger.error(msg)
return list(), msg
log = self._get_log_url(pg_response)
code = pg_response['code']
if code:
output = pg_response['output']
msg = "%s Server error %d: %s" % (self.fwtype, code, output)
if log:
msg += " (log url: %s)" % log
self.logger.error(msg)
# Return an empty list.
return list(), msg
# value is an array. For each entry, type=ssh, key=<key>
if not isinstance(pg_response['value'], list):
self.logger.error("Non list response for value: %r" % pg_response['value']);
return pg_response['value'], None
keys = list()
for key in pg_response['value']:
if not key.has_key('key'):
self.logger.error("GetKeys list missing key value?");
continue
keys.append({'public_key': key['key']})
return keys, None
def _get_components(self):
"""Gets the ProtoGENI component managers from the ProtoGENI
Clearinghouse. Returns a list of dictionaries as documented
in https://www.protogeni.net/trac/protogeni/wiki/ClearingHouseAPI2#ListComponents
"""
cred, message = self.get_user_cred()
if not cred:
raise Exception("Cannot get %s components - no user credential available. %s" % (self.fwtype, message))
(pg_response, message) = _do_ssl(self, None, "List Components at %s CH %s" % (self.fwtype, self.config['ch']), self.ch.ListComponents, {'credential': cred})
if (pg_response is None) or (pg_response['code']):
self.logger.error("Cannot list %s components: %s", self.fwtype, message)
if pg_response:
self.logger.error("Received error code: %d", pg_response['code'])
output = pg_response['output']
self.logger.error("Received error message: %s", output)
log = self._get_log_url(pg_response)
if log:
self.logger.error("%s log url: %s", self.fwtype, log)
# Return an empty list.
return list()
# value is a list of dicts, each containing info about an aggregate
return pg_response['value']
def _find_geni_ams(self, cm_dicts):
"""Finds ComponentManagers that also support the GENI AM API.
Returns a list of dicts containing those CMs that implement the AM API.
The AM URL is included in the dict in the key 'am_url'.
"""
result = list()
for cm_dict in cm_dicts:
if cm_dict.has_key("url"):
cm_url = cm_dict['url']
else:
self.logger.error("Missing url key for CM %s", cm_dict)
continue
if not cm_dict.has_key("urn"):
self.logger.error("Missing urn key for CM %s", cm_dict)
cm_dict["urn"] = ''
self.logger.debug('Checking for AM at %s', cm_url)
am_url = self._cm_to_am(cm_url)
self.logger.debug('AM URL = %s', am_url)
# Test the am_url...
# timeout is in seconds
client = self.make_client(am_url, self.key, self.cert,
self.config['verbose'],
timeout=5)
# This refactoring means we print verbose errors for 404 Not Found messages like
# we get when there is no AM for the CM
# Old version skipped xmlrpclib.ProtocolError,
# ssl.SSLError, socket.error
(version, message) = _do_ssl(self, ("404 Not Found", "Name or service not known", "timed out"), "Test PG AM for GENI API compatibilitity at %s" % am_url, client.GetVersion)
# FIXME: look at the message and say something re-assuring
# on OK errors?
_ = message #Appease eclipse
self.logger.debug('version = %r', version)
if version is not None:
if version.has_key('geni_api'):
cm_dict['am_url'] = am_url
result.append(cm_dict)
return result
def _cm_to_am(self, url):
"""Convert a CM url to an AM url."""
# Replace the trailing "cm" with "am"
if url.endswith('/protogeni/xmlrpc/cm'):
return url[:-2] + 'am'
else:
return url
def get_version(self):
# Here we call getversion at the CH, then append the getversion at the SA
pg_response = dict()
versionstruct = dict()
(pg_response, message) = _do_ssl(self, None, ("GetVersion of %s CH %s using cert %s" % (self.fwtype, self.config['ch'], self.config['cert'])), self.ch.GetVersion)
_ = message #Appease eclipse
if pg_response is None:
self.logger.error("Failed to get version of %s CH: %s", self.fwtype, message)
# FIXME: Return error message?
return None, message
code = pg_response['code']
log = self._get_log_url(pg_response)
if code:
self.logger.error("Failed to get version of %s CH: Received error code: %d", self.fwtype, code)
output = pg_response['output']
self.logger.error("Received error message: %s", output)
if log:
self.logger.error("See log: %s", log)
#return None
else:
versionstruct = pg_response['value']
if log:
self.logger.debug("%s log url: %s", self.fwtype, log)
sa_response = None
(sa_response, message2) = _do_ssl(self, None, ("GetVersion of %s SA %s using cert %s" % (self.fwtype, self.config['sa'], self.config['cert'])), self.sa.GetVersion)
_ = message2 #Appease eclipse
if sa_response is not None:
if isinstance(sa_response, dict) and sa_response.has_key('value'):
versionstruct['sa-version'] = sa_response['value']
else:
versionstruct['sa-version'] = sa_response
return versionstruct, message
|
|
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_os_version():
""" returns the underlying OS as string: either win, linux or mac """
ver = sys.platform.lower()
if ver.startswith('java'):
import java.lang
ver = java.lang.System.getProperty("os.name").lower()
return ver
_OS = get_os_version()
# _OS should be in [ 'win', 'linux', 'mac os x']
default_wine_path = which('wine')
prop_dir = os.getenv('USERHOME_DOT_TOPSPIN', "not defined")
environment_var = os.environ.copy()
def read_config_file():
""" read config file and returns a dictionnary
If no default declared, first entry is default
"""
config = dict()
config_file = os.path.join(prop_dir,'JTutils','dmfit.path')
if not os.path.exists(config_file):
return config
with open(config_file, 'r') as f:
key = None
for line in f:
line = line.strip()
if line.startswith('['):
key = line.lstrip('[')
key = key.rstrip(']')
config[key] = dict()
continue
if key is not None:
for keyword in ['DMFITPATH', 'WINEPATH', 'WINEPREFIX', 'DEFAULT']:
if line.startswith(keyword):
_, config[key][keyword] = line.split('=')
config[key][keyword] = config[key][keyword].strip()
for key in config.keys():
if 'DEFAULT' not in config[key].keys():
config[key]['DEFAULT'] = False
else:
print
config[key]['DEFAULT'] = (config[key]['DEFAULT'] == '1')
return config
def write_config_file(config):
""" Write a configuration file for dmfit given a dictionnary """
config_dir = os.path.join(prop_dir,'JTutils')
# test if JTutils folder exist
if not os.path.exists(config_dir):
os.makedirs(config_dir)
config_file = os.path.join(prop_dir,'JTutils','dmfit.path')
with open(config_file, 'w') as f:
for key in config.keys():
f.write('['+key+']')
f.write('\n')
for keyword in config[key].keys():
if keyword == 'DEFAULT':
if config[key][keyword]:
f.write("=".join([keyword, '1']))
else:
f.write("=".join([keyword, '0']))
else:
f.write("=".join([keyword, config[key][keyword]]))
f.write('\n')
def check_config(config):
pass
def run_setup():
config = read_config_file()
setup_panel = ConfigPanel(config)
# dmfitpath = select_dmfitpath()
# MSG(dmfitpath)
# winepath = select_winepath()
# MSG(winepath)
# check if file already exists
# parse the file
# check for validity of each entry
# return info on validity
# ask for update (edit/add/delete)
# dmfit location
# wine exe
# wine prefix
# check for validity
# add entry
# ask for update (loop)
# write file
from java.io import File
from java.awt import BorderLayout, Dimension
from javax.swing import JFileChooser, JFrame, JPanel, JLabel, JButton, JRadioButton, ButtonGroup, BoxLayout, Box, JTextField, JDialog
class FileSelector(JFrame):
""" Opens a file selector dialog """
def __init__(self, hidden=False, dir_only=False, title='', defaultFile=''):
super(FileSelector, self).__init__()
self.file_name = None
self.initUI(hidden, dir_only, title, defaultFile)
def initUI(self, hidden, dir_only, title, defaultFile):
self.panel = JPanel()
self.panel.setLayout(BorderLayout())
chosenFile = JFileChooser()
chosenFile.setSelectedFile(File(defaultFile))
chosenFile.setDialogTitle(title)
if dir_only:
chosenFile.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY)
chosenFile.setFileHidingEnabled(hidden)
ret = chosenFile.showOpenDialog(self.panel)
if ret == JFileChooser.APPROVE_OPTION:
if dir_only:
if chosenFile.getSelectedFile().isDirectory():
self.file_name = str(chosenFile.getSelectedFile())
else:
self.file_name = str(chosenFile.getSelectedFile())
def get_file_name(self):
return self.file_name
class ConfigPanel(JFrame):
class SetKey(JDialog):
def __init__(self, button=None):
super(ConfigPanel.SetKey, self).__init__()
self.setTitle('Enter version label')
self.setModal(True)
if button is None:
self.button = JButton()
else:
self.button = button
self.initUI()
def initUI(self):
self.JP = JPanel()
self.JL = JLabel('Set version label: enter to validate')
self.JP.add(self.JL)
self.JTF = JTextField(self.button.getText(), 10)
self.JTF.actionPerformed = self.doit
self.JP.add(self.JTF)
self.add(self.JP)
self.pack()
self.setLocation(150,150)
self.setVisible(True)
def doit(self,event):
key = self.JTF.getText()
self.button.setText(self.JTF.getText())
self.dispose()
def __init__(self, config=dict()):
super(ConfigPanel, self).__init__()
self.setTitle('DMFIT setup')
self.config = config.copy()
self.param_list = ['DMFITPATH', 'WINEPATH', 'WINEPREFIX', 'DEFAULT']
self.actions_list = {'DMFITPATH': self.set_dmfitpath, 'WINEPATH': self.set_winepath, 'WINEPREFIX': self.set_wineprefix}
self.initUI(self.config)
def update_config_from_UI(self):
config = dict()
for keyUI in self.config_item_dict.keys():
key = self.config_item_dict[keyUI]['JB'].getText()
config[key] = dict()
for param in self.config_item_dict[keyUI].keys():
if param not in self.param_list:
continue
if param == 'DEFAULT':
config[key][param] = self.config_item_dict[keyUI][param]['JRB'].isSelected()
else:
print(type(self.config_item_dict[keyUI][param]))
config[key][param] = self.config_item_dict[keyUI][param]['JB'].getText()
self.config = config
print self.config
def add_entry(self, event):
# get key,
new_version = dict()
dummyJB = JButton()
ConfigPanel.SetKey(dummyJB)
key = dummyJB.getText()
del dummyJB
if key == '':
return
winepath=None
wineprefix=None
dmfitpath = select_dmfitpath()
if dmfitpath is None:
return
new_version['DMFITPATH'] = dmfitpath
if 'win' not in _OS : # then one must use WINE
winepath = select_winepath()
if winepath is None:
return
new_version['WINEPATH'] = winepath
wineprefix = self.guess_wineprefix(dmfitpath)
if wineprefix is None:
return
new_version['WINEPREFIX'] = wineprefix
new_version['DEFAULT'] = False
self.add_UI_entry(key, new_version)
self.revalidate()
def remove_entry(self,event):
selectedRB = None
for i in self.select_key_rb_group.getElements():
if i.isSelected():
selectedRB = i
break
if selectedRB is None:
return
self.select_key_rb_group.remove(selectedRB)
key = self.hash4keys[selectedRB]
self.select_default_rb_group.remove(self.config_item_dict[key]['DEFAULT']['JRB'])
self.panelEntries.remove(self.config_item_dict[key]['JP'])
self.revalidate()
del self.config_item_dict[key]
self.pack()
def save_config(self,event):
self.update_config_from_UI()
write_config_file(self.config)
self.dispose()
def guess_wineprefix(self, path):
head, tail = os.path.split(path)
while tail != 'drive_c' and tail != '':
head, tail = os.path.split(head)
if tail == '':
"ask for explicit wine prefix"
chooseUI = FileSelector(hidden=True, dir_only=True, title='Select WINEPREFIX')
wineprefix = chooseUI.get_file_name()
else :
wineprefix = head
return wineprefix
def set_dmfitpath(self, event):
button = event.getSource()
old_path = button.getText()
path = select_dmfitpath(defaultFile=old_path)
if path is None:
return
button.setText(path)
if 'win' not in _OS : # let's guess wineprefix
wineprefix = self.guess_wineprefix()
key = self.hash4keys[button]
self.config_item_dict[key]['WINEPREFIX']['JB'].setText(wineprefix)
def set_wineprefix(self, event):
button = event.getSource()
old_path = button.getText()
chooseUI = FileSelector(hidden=False, dir_only=True, title='Select WINEPREFIX')
path = chooseUI.get_file_name()
if path is None:
return
button.setText(path)
def set_winepath(self, event):
button = event.getSource()
old_path = button.getText()
path = select_winepath(defaultFile=old_path)
if path is None:
return
button.setText(path)
def set_key(self, event):
button = event.getSource()
SK = ConfigPanel.SetKey(event.getSource())
def add_UI_entry(self,key, dico=dict()):
UI_key_dict = dict()
UI_key_dict['JP'] = JPanel()
UI_key_dict['JP'].setLayout(BoxLayout(UI_key_dict['JP'], BoxLayout.X_AXIS))
UI_key_dict['JRB'] = JRadioButton()
self.select_key_rb_group.add(UI_key_dict['JRB'])
self.hash4keys[UI_key_dict['JRB']] = key
UI_key_dict['JB'] = JButton(key, actionPerformed=self.set_key)
UI_key_dict['JB'].setPreferredSize(Dimension(100,25))
UI_key_dict['JPP'] = JPanel()
UI_key_dict['JP'].add(UI_key_dict['JRB'])
UI_key_dict['JP'].add(UI_key_dict['JB'])
UI_key_dict['JP'].add(Box.createRigidArea(Dimension(15, 0)))
UI_key_dict['JP'].add(UI_key_dict['JPP'])
UI_key_dict['JPP'].setLayout(BoxLayout(UI_key_dict['JPP'], BoxLayout.Y_AXIS))
self.panelEntries.add(UI_key_dict['JP'])
for param in self.param_list:
if param not in dico.keys(): continue
if param == 'DEFAULT':
UI_key_dict[param] = {'JP':JPanel(), 'JRB': JRadioButton('is Default')}
UI_key_dict[param]['JP'].setLayout(BoxLayout(
UI_key_dict[param]['JP'], BoxLayout.X_AXIS))
UI_key_dict[param]['JP'].add(UI_key_dict[param]['JRB'])
UI_key_dict[param]['JP'].add(Box.createHorizontalGlue())
self.select_default_rb_group.add(UI_key_dict[param]['JRB'])
UI_key_dict['JPP'].add(UI_key_dict[param]['JP'])
UI_key_dict[param]['JRB'].setSelected(dico[param])
self.hash4keys[UI_key_dict[param]['JRB']] = key
continue
UI_key_dict[param] = { 'JP':JPanel(), 'JL': JLabel(param+": "),
'JB': JButton(dico[param]) }
self.hash4keys[UI_key_dict[param]['JB']] = key
UI_key_dict[param]['JL'].setPreferredSize(Dimension(100,25))
UI_key_dict[param]['JB'].actionPerformed = self.actions_list[param]
UI_key_dict[param]['JP'].setLayout(BoxLayout(UI_key_dict[param]['JP'], BoxLayout.X_AXIS))
UI_key_dict[param]['JP'].add(UI_key_dict[param]['JL'])
UI_key_dict[param]['JP'].add(UI_key_dict[param]['JB'])
UI_key_dict[param]['JP'].add(Box.createHorizontalGlue())
UI_key_dict['JPP'].add(UI_key_dict[param]['JP'])
UI_key_dict['JPP'].add(Box.createRigidArea(Dimension(0, 20)))
self.config_item_dict[key]=UI_key_dict
self.pack()
pass
def initUI(self, config):
self.setLayout(BoxLayout(self.getContentPane(), BoxLayout.Y_AXIS))
self.entries = config.keys()
self.hash4keys = dict()
self.panelEntries = JPanel()
self.panelEntries.setLayout(BoxLayout(self.panelEntries,BoxLayout.Y_AXIS))
self.add(self.panelEntries)
#buttons
self.panelButtons = JPanel()
self.panelButtons.setLayout(BoxLayout(self.panelButtons,BoxLayout.X_AXIS))
#'Configuration list:')
self.addB = JButton('Add')
self.addB.actionPerformed = self.add_entry
self.removeB = JButton('Remove')
self.removeB.actionPerformed = self.remove_entry
self.saveB = JButton('Save')
self.saveB.actionPerformed = self.save_config
# pack buttons
self.add(self.panelButtons)
self.panelButtons.add(self.addB)
self.panelButtons.add(self.removeB)
self.panelButtons.add(self.saveB)
self.config_item_dict = {}
self.select_key_rb_group = ButtonGroup()
self.select_default_rb_group = ButtonGroup()
for key in self.entries:
if key == 'default':
continue
self.add_UI_entry(key, config[key])
self.pack()
self.setLocation(150,150)
self.setVisible(True)
def select_winepath(hidden=False, dir_only=False, title="Select wine executable", defaultFile=which('wine')):
chooseUI = FileSelector(hidden, dir_only, title, defaultFile)
return chooseUI.get_file_name()
def select_dmfitpath(hidden=(_OS == 'win'), dir_only=False, title='Select DMFIT executable', defaultFile=''):
chooseUI = FileSelector(hidden, dir_only, title, defaultFile)
# note : WINEPREFIX should be set from dmfitpath
return chooseUI.get_file_name()
if __name__ == "__main__" :
run_setup()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=line-too-long
"""Parameter optimizer."""
__all__ = ['Trainer']
from .. import optimizer as opt
from ..model import _create_kvstore
from .parameter import ParameterDict, Parameter
class Trainer(object):
"""Applies an `Optimizer` on a set of Parameters. Trainer should
be used together with `autograd`.
Parameters
----------
params : ParameterDict
The set of parameters to optimize.
optimizer : str or Optimizer
The optimizer to use. See
`help <http://mxnet.io/api/python/optimization/optimization.html#the-mxnet-optimizer-package>`_
on Optimizer for a list of available optimizers.
optimizer_params : dict
Key-word arguments to be passed to optimizer constructor. For example,
`{'learning_rate': 0.1}`. All optimizers accept learning_rate, wd (weight decay),
clip_gradient, and lr_scheduler. See each optimizer's
constructor for a list of additional supported arguments.
kvstore : str or KVStore
kvstore type for multi-gpu and distributed training. See help on
:any:`mxnet.kvstore.create` for more information.
compression_params : dict
Specifies type of gradient compression and additional arguments depending
on the type of compression being used. For example, 2bit compression requires a threshold.
Arguments would then be {'type':'2bit', 'threshold':0.5}
See mxnet.KVStore.set_gradient_compression method for more details on gradient compression.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, params, optimizer, optimizer_params=None, kvstore='device',
compression_params=None):
if isinstance(params, (dict, ParameterDict)):
params = list(params.values())
if not isinstance(params, (list, tuple)):
raise ValueError(
"First argument must be a list or dict of Parameters, " \
"got %s."%(type(params)))
self._params = []
for param in params:
if not isinstance(param, Parameter):
raise ValueError(
"First argument must be a list or dict of Parameters, " \
"got list of %s."%(type(param)))
self._params.append(param)
self._compression_params = compression_params
optimizer_params = optimizer_params if optimizer_params else {}
self._scale = optimizer_params.get('rescale_grad', 1.0)
self._contexts = self._check_contexts()
self._init_optimizer(optimizer, optimizer_params)
self._kv_initialized = False
self._kvstore = kvstore
def _check_contexts(self):
contexts = None
for param in self._params:
ctx = param.list_ctx()
assert contexts is None or contexts == ctx, \
"All Parameters must be initialized on the same set of contexts, " \
"but Parameter %s is initialized on %s while previous Parameters " \
"are initialized on %s."%(param.name, str(ctx), str(contexts))
contexts = ctx
return contexts
def _init_optimizer(self, optimizer, optimizer_params):
param_dict = {i: param for i, param in enumerate(self._params)}
if isinstance(optimizer, opt.Optimizer):
assert not optimizer_params, \
"optimizer_params must be None if optimizer is an instance of " \
"Optimizer instead of str"
self._optimizer = optimizer
self._optimizer.param_dict = param_dict
else:
self._optimizer = opt.create(optimizer, param_dict=param_dict,
**optimizer_params)
self._updaters = [opt.get_updater(self._optimizer) \
for _ in self._contexts]
def _init_kvstore(self):
arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params}
kvstore, update_on_kvstore = _create_kvstore(self._kvstore, len(self._contexts),
arg_arrays)
if kvstore:
if self._compression_params:
kvstore.set_gradient_compression(self._compression_params)
if 'dist' in kvstore.type:
update_on_kvstore = False
for i, param in enumerate(self._params):
param_arrays = param.list_data()
kvstore.init(i, param_arrays[0])
kvstore.pull(i, param_arrays, priority=-i)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
else:
self._kvstore = None
self._update_on_kvstore = None
self._kv_initialized = True
@property
def learning_rate(self):
if not isinstance(self._optimizer, opt.Optimizer):
raise UserWarning("Optimizer has to be defined before its learning "
"rate can be accessed.")
else:
return self._optimizer.learning_rate
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if not isinstance(self._optimizer, opt.Optimizer):
raise UserWarning("Optimizer has to be defined before its learning "
"rate is mutated.")
else:
self._optimizer.set_learning_rate(lr)
def step(self, batch_size, ignore_stale_grad=False):
"""Makes one step of parameter update. Should be called after
`autograd.compute_gradient` and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update.
"""
if not self._kv_initialized:
self._init_kvstore()
self._optimizer.rescale_grad = self._scale / batch_size
for i, param in enumerate(self._params):
if param.grad_req == 'null':
continue
if not ignore_stale_grad:
for data in param.list_data():
if not data._fresh_grad:
raise UserWarning(
"Gradient of Parameter `%s` on context %s has not been updated "
"by backward since last `step`. This could mean a bug in your "
"model that maked it only use a subset of the Parameters (Blocks) "
"for this iteration. If you are intentionally only using a subset, "
"call step with ignore_stale_grad=True to suppress this "
"warning and skip updating of Parameters with stale gradient" \
%(param.name, str(data.context)))
if self._kvstore:
self._kvstore.push(i, param.list_grad(), priority=-i)
if self._update_on_kvstore:
self._kvstore.pull(i, param.list_data(), priority=-i)
continue
else:
self._kvstore.pull(i, param.list_grad(), priority=-i)
for upd, arr, grad in zip(self._updaters, param.list_data(), param.list_grad()):
if not ignore_stale_grad or arr._fresh_grad:
upd(i, grad, arr)
arr._fresh_grad = False
def save_states(self, fname):
"""Saves trainer states (e.g. optimizer, momentum) to a file.
Parameters
----------
fname : str
Path to output states file.
"""
assert self._optimizer is not None
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname, dump_optimizer=True)
else:
with open(fname, 'wb') as fout:
fout.write(self._updaters[0].get_states(dump_optimizer=True))
def load_states(self, fname):
"""Loads trainer states (e.g. optimizer, momentum) from a file.
Parameters
----------
fname : str
Path to input states file.
"""
if not self._kv_initialized:
self._init_kvstore()
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
self._optimizer = self._kvstore._updater.optimizer
else:
with open(fname, 'rb') as f:
states = f.read()
for updater in self._updaters:
updater.set_states(states)
updater.optimizer = self._updaters[0].optimizer
self._optimizer = self._updaters[0].optimizer
|
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import py
import pytest
import qisys.script
import qisys.sh
import qisrc.git
from qisrc.test.conftest import TestGitWorkTree, TestGit
from qibuild.test.conftest import TestBuildWorkTree
import qibuild.config
import qibuild.profile
def test_sync_clones_new_repos(qisrc_action, git_server):
git_server.create_repo("foo.git")
git_server.create_repo("bar.git")
qisrc_action("init", git_server.manifest_url)
# pylint: disable-msg=E1101
cwd = py.path.local(os.getcwd())
assert not cwd.join("foo").join("README").check(file=True)
git_server.push_file("foo.git", "README", "This is foo\n")
qisys.script.run_action("qisrc.actions.sync")
assert cwd.join("foo").join("README").check(file=True)
def test_sync_skips_unconfigured_projects(qisrc_action, git_server, test_git):
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
git_worktree = TestGitWorkTree()
# pylint: disable-msg=E1101
cwd = py.path.local(os.getcwd())
new_proj = cwd.mkdir("new_proj")
git = test_git(new_proj.strpath)
git.initialize()
git_worktree.add_git_project(new_proj.strpath)
rc = qisrc_action("sync", retcode=True)
assert rc != 0
def test_clone_new_repos(qisrc_action, git_server):
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
git_server.create_repo("bar.git")
qisrc_action("sync")
git_worktree = TestGitWorkTree()
assert git_worktree.get_git_project("bar")
def test_configure_new_repos(qisrc_action, git_server):
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
qisrc_action("sync")
git_server.create_repo("bar.git")
qisrc_action("sync", "foo") # Sync only foo, but expect to clone bar
git_worktree = TestGitWorkTree()
bar = git_worktree.get_git_project("bar")
assert bar.default_remote
def test_creates_required_subdirs(qisrc_action, git_server):
git_server.create_repo("foo/bar.git")
qisrc_action("init", git_server.manifest_url)
qisrc_action("sync")
git_worktree = TestGitWorkTree()
assert git_worktree.get_git_project("foo/bar")
def test_uses_build_deps_by_default(qisrc_action, git_server):
git_server.add_qibuild_test_project("world")
git_server.add_qibuild_test_project("hello")
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
# Crete some changes in foo and world
git_server.push_file("foo.git", "foo.txt", "unrelated changes")
git_server.push_file("world.git", "world.txt", "dependency has been updated")
# Sync hello
qisrc_action.chdir("hello")
qisrc_action("sync")
qisrc_action.chdir(qisrc_action.root)
git_worktree = TestGitWorkTree()
# foo is not a dep, should not have changed:
foo_proj = git_worktree.get_git_project("foo")
foo_txt = os.path.join(foo_proj.path, "foo.txt")
assert not os.path.exists(foo_txt)
# World is a dep of hello:
world_proj = git_worktree.get_git_project("world")
world_txt = os.path.join(world_proj.path, "world.txt")
assert os.path.exists(world_txt)
def test_sync_build_profiles(qisrc_action, git_server):
git_server.add_build_profile("foo", [("WITH_FOO", "ON")])
qisrc_action("init", git_server.manifest_url)
build_worktree = TestBuildWorkTree()
build_config = qibuild.build_config.CMakeBuildConfig(build_worktree)
qibuild.config.add_build_config("foo", profiles=["foo"])
build_config.set_active_config("foo")
assert build_config.cmake_args == ["-DCMAKE_BUILD_TYPE=Debug", "-DWITH_FOO=ON"]
git_server.add_build_profile("foo", [("WITH_FOO", "ON"), ("WITH_BAR", "ON")])
qisrc_action("sync")
assert build_config.cmake_args == ["-DCMAKE_BUILD_TYPE=Debug",
"-DWITH_FOO=ON", "-DWITH_BAR=ON"]
def test_sync_branch_devel(qisrc_action, git_server, test_git):
# This tests the case where everything goes smoothly
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
git_server.push_file("foo.git", "foo.txt", "a super change")
git_server.push_file("foo.git", "bar.txt", "a super bugfix")
git_worktree = TestGitWorkTree()
foo = git_worktree.get_git_project("foo")
test_git = TestGit(foo.path)
test_git.call("checkout", "-b", "devel")
test_git.commit_file("developing.txt", "like a boss")
git_server.push_file("foo.git", "foobar.txt", "some other change")
git_server.push_file("foo.git", "bigchange.txt", "some huge change")
qisrc_action("sync", "--rebase-devel")
test_git.call("checkout", "master")
# Check that master is fast-forwarded
bigchange_txt = os.path.join(foo.path, "bigchange.txt")
assert os.path.exists(bigchange_txt)
# Check rebase is done smoothly
test_git.call("checkout", "devel")
test_git.call("rebase", "master")
assert os.path.exists(bigchange_txt)
developing_txt = os.path.join(foo.path, "developing.txt")
assert os.path.exists(developing_txt)
def test_sync_branch_devel_unclean(qisrc_action, git_server, test_git):
# Case where the worktree isn't clean
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
git_server.push_file("foo.git", "foo.txt", "a super change")
git_server.push_file("foo.git", "bar.txt", "a super bugfix")
git_worktree = TestGitWorkTree()
foo = git_worktree.get_git_project("foo")
test_git = TestGit(foo.path)
test_git.call("checkout", "-b", "devel")
test_git.commit_file("developing.txt", "like a boss")
git_server.push_file("foo.git", "foobar.txt", "some other change")
wip_txt = os.path.join(foo.path, "wip.txt")
open(wip_txt, 'w').close()
qisys.script.run_action("qisrc.actions.sync", ["--rebase-devel"])
# Master has been fast-forwarded and I haven't lost my WIP
assert os.path.exists(wip_txt)
def test_sync_branch_devel_no_ff(qisrc_action, git_server, test_git):
# Case where master can't be fast-forwarded, does nothing except warning
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
git_server.push_file("foo.git", "foo.txt", "a super change")
git_worktree = TestGitWorkTree()
foo = git_worktree.get_git_project("foo")
test_git = TestGit(foo.path)
test_git.commit_file("foo.git", "div.txt", "diverging from master")
master_sha1 = test_git.get_ref_sha1("refs/heads/master")
test_git.call("checkout", "-b", "devel")
test_git.commit_file("developing.txt", "like a boss")
git_server.push_file("foo.git", "foobar.txt", "some other change")
qisrc_action("sync", "--rebase-devel")
# Master HEAD is untouched
assert test_git.get_ref_sha1("refs/heads/master") == master_sha1
def test_sync_dash_g(qisrc_action, git_server):
git_server.create_group("mygroup", ["a", "b"])
git_server.create_repo("other")
git_server.push_file("other", "other.txt", "change 1")
qisrc_action("init", git_server.manifest_url)
git_server.push_file("other", "other.txt", "change 2")
qisrc_action("sync", "--group", "mygroup")
git_worktree = TestGitWorkTree()
other_proj = git_worktree.get_git_project("other")
other_git = TestGit(other_proj.path)
assert other_git.read_file("other.txt") == "change 1"
def test_incorrect_branch_still_fetches(qisrc_action, git_server):
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
qisrc_action("sync")
git_worktree = TestGitWorkTree()
foo = git_worktree.get_git_project("foo")
test_git = TestGit(foo.path)
test_git.checkout("-b", "wip")
git_server.push_file("foo.git", "foo.txt", "some change")
previous_sha1 = test_git.get_ref_sha1("refs/remotes/origin/master")
foo.sync()
new_sha1 = test_git.get_ref_sha1("refs/remotes/origin/master")
assert previous_sha1 != new_sha1
def test_keeps_staged_changes(qisrc_action, git_server):
git_server.create_repo("foo.git")
qisrc_action("init", git_server.manifest_url)
qisrc_action("sync")
git_worktree = TestGitWorkTree()
foo = git_worktree.get_git_project("foo")
test_git = TestGit(foo.path)
staged_file = os.path.join(foo.path, "staged")
with open(staged_file, "w") as f:
f.write("I'm going to stage stuff")
test_git.add(staged_file)
foo.sync()
assert os.path.exists(staged_file)
def test_new_project_under_gitorious(git_worktree, git_server):
git_server.create_repo("foo", review=False)
manifest_url = git_server.manifest_url
worktree_syncer = qisrc.sync.WorkTreeSyncer(git_worktree)
worktree_syncer.configure_manifest(manifest_url)
foo = git_worktree.get_git_project("foo")
git_server.use_gitorious("foo")
worktree_syncer.sync()
foo = git_worktree.get_git_project("foo")
assert len(foo.remotes) == 1
assert foo.default_remote.name == "gitorious"
def test_removing_forked_project(qisrc_action, git_server):
git_server.create_repo("booz")
git_server.switch_manifest_branch("devel")
git_server.change_branch("booz", "devel")
qisrc_action("init", git_server.manifest_url, "--branch", "devel")
git_worktree = TestGitWorkTree()
booz_proj = git_worktree.get_git_project("booz")
git = qisrc.git.Git(booz_proj.path)
assert git.get_current_branch() == "devel"
git_server.change_branch("booz", "master")
qisrc_action("sync", "-a", retcode=True)
qisrc_action("checkout", "devel")
assert git.get_current_branch() == "master"
def test_sync_reset(qisrc_action, git_server):
git_server.create_repo("bar")
git_server.create_repo("baz")
qisrc_action("init", git_server.manifest_url)
git_worktree = TestGitWorkTree()
bar_proj = git_worktree.get_git_project("bar")
baz_proj = git_worktree.get_git_project("baz")
bar_git = TestGit(bar_proj.path)
baz_git = TestGit(baz_proj.path)
bar_git.checkout("-B", "devel")
baz_git.commit_file("unrelated.txt", "unrelated\n")
git_server.push_file("bar", "bar.txt", "this is bar\n")
qisrc_action("sync", "--reset")
assert bar_git.get_current_branch() == "master"
assert bar_git.read_file("bar.txt") == "this is bar\n"
# pylint: disable-msg=E1101
with pytest.raises(Exception):
baz_git.read_file("unrelated.txt")
def test_retcode_when_skipping(qisrc_action, git_server):
git_server.create_repo("bar")
qisrc_action("init", git_server.manifest_url)
git_worktree = TestGitWorkTree()
bar_proj = git_worktree.get_git_project("bar")
git = TestGit(bar_proj.path)
git.checkout("-b", "devel")
rc = qisrc_action("sync", retcode=True)
assert rc != 0
def test_do_not_sync_when_clone_fails(qisrc_action, git_server, record_messages):
git_server.create_repo("bar.git")
qisrc_action("init", git_server.manifest_url)
git_server.create_repo("baz.git")
git_server.srv.join("baz.git").remove()
rc = qisrc_action("sync", retcode=True)
assert rc != 0
assert not record_messages.find("Success")
|
|
from __future__ import absolute_import, unicode_literals
import json
import warnings
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.six.moves.urllib.parse import urlparse
from elasticsearch import Elasticsearch, NotFoundError
from elasticsearch.helpers import bulk
from wagtail.utils.deprecation import RemovedInWagtail18Warning
from wagtail.wagtailsearch.backends.base import (
BaseSearchBackend, BaseSearchQuery, BaseSearchResults)
from wagtail.wagtailsearch.index import FilterField, RelatedFields, SearchField, class_is_indexed
class ElasticsearchMapping(object):
type_map = {
'AutoField': 'integer',
'BinaryField': 'binary',
'BooleanField': 'boolean',
'CharField': 'string',
'CommaSeparatedIntegerField': 'string',
'DateField': 'date',
'DateTimeField': 'date',
'DecimalField': 'double',
'FileField': 'string',
'FilePathField': 'string',
'FloatField': 'double',
'IntegerField': 'integer',
'BigIntegerField': 'long',
'IPAddressField': 'string',
'GenericIPAddressField': 'string',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'integer',
'SlugField': 'string',
'SmallIntegerField': 'integer',
'TextField': 'string',
'TimeField': 'date',
}
def __init__(self, model):
self.model = model
def get_document_type(self):
return self.model.indexed_get_content_type()
def get_field_column_name(self, field):
if isinstance(field, FilterField):
return field.get_attname(self.model) + '_filter'
elif isinstance(field, SearchField):
return field.get_attname(self.model)
elif isinstance(field, RelatedFields):
return field.field_name
def get_field_mapping(self, field):
if isinstance(field, RelatedFields):
mapping = {'type': 'nested', 'properties': {}}
nested_model = field.get_field(self.model).related_model
nested_mapping = type(self)(nested_model)
for sub_field in field.fields:
sub_field_name, sub_field_mapping = nested_mapping.get_field_mapping(sub_field)
mapping['properties'][sub_field_name] = sub_field_mapping
return self.get_field_column_name(field), mapping
else:
mapping = {'type': self.type_map.get(field.get_type(self.model), 'string')}
if isinstance(field, SearchField):
if field.boost:
mapping['boost'] = field.boost
if field.partial_match:
mapping['index_analyzer'] = 'edgengram_analyzer'
mapping['include_in_all'] = True
elif isinstance(field, FilterField):
mapping['index'] = 'not_analyzed'
mapping['include_in_all'] = False
if 'es_extra' in field.kwargs:
for key, value in field.kwargs['es_extra'].items():
mapping[key] = value
return self.get_field_column_name(field), mapping
def get_mapping(self):
# Make field list
fields = {
'pk': dict(type='string', index='not_analyzed', store='yes', include_in_all=False),
'content_type': dict(type='string', index='not_analyzed', include_in_all=False),
'_partials': dict(type='string', index_analyzer='edgengram_analyzer', include_in_all=False),
}
fields.update(dict(
self.get_field_mapping(field) for field in self.model.get_search_fields()
))
return {
self.get_document_type(): {
'properties': fields,
}
}
def get_document_id(self, obj):
return obj.indexed_get_toplevel_content_type() + ':' + str(obj.pk)
def _get_nested_document(self, fields, obj):
doc = {}
partials = []
model = type(obj)
mapping = type(self)(model)
for field in fields:
value = field.get_value(obj)
doc[mapping.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
return doc, partials
def get_document(self, obj):
# Build document
doc = dict(pk=str(obj.pk), content_type=self.model.indexed_get_content_type())
partials = []
for field in self.model.get_search_fields():
value = field.get_value(obj)
if isinstance(field, RelatedFields):
if isinstance(value, models.Manager):
nested_docs = []
for nested_obj in value.all():
nested_doc, extra_partials = self._get_nested_document(field.fields, nested_obj)
nested_docs.append(nested_doc)
partials.extend(extra_partials)
value = nested_docs
elif isinstance(value, models.Model):
value, extra_partials = self._get_nested_document(field.fields, value)
partials.extend(extra_partials)
doc[self.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
# Add partials to document
doc['_partials'] = partials
return doc
def __repr__(self):
return '<ElasticsearchMapping: %s>' % (self.model.__name__, )
class ElasticsearchSearchQuery(BaseSearchQuery):
mapping_class = ElasticsearchMapping
DEFAULT_OPERATOR = 'or'
def __init__(self, *args, **kwargs):
super(ElasticsearchSearchQuery, self).__init__(*args, **kwargs)
self.mapping = self.mapping_class(self.queryset.model)
# Convert field names into index column names
if self.fields:
fields = []
searchable_fields = {f.field_name: f for f in self.queryset.model.get_searchable_search_fields()}
for field_name in self.fields:
if field_name in searchable_fields:
field_name = self.mapping.get_field_column_name(searchable_fields[field_name])
fields.append(field_name)
self.fields = fields
def _process_lookup(self, field, lookup, value):
column_name = self.mapping.get_field_column_name(field)
if lookup == 'exact':
if value is None:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'term': {
column_name: value,
}
}
if lookup == 'isnull':
if value:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'not': {
'missing': {
'field': column_name,
}
}
}
if lookup in ['startswith', 'prefix']:
return {
'prefix': {
column_name: value,
}
}
if lookup in ['gt', 'gte', 'lt', 'lte']:
return {
'range': {
column_name: {
lookup: value,
}
}
}
if lookup == 'range':
lower, upper = value
return {
'range': {
column_name: {
'gte': lower,
'lte': upper,
}
}
}
if lookup == 'in':
return {
'terms': {
column_name: list(value),
}
}
def _connect_filters(self, filters, connector, negated):
if filters:
if len(filters) == 1:
filter_out = filters[0]
else:
filter_out = {
connector.lower(): [
fil for fil in filters if fil is not None
]
}
if negated:
filter_out = {
'not': filter_out
}
return filter_out
def get_inner_query(self):
if self.query_string is not None:
fields = self.fields or ['_all', '_partials']
if len(fields) == 1:
if self.operator == 'or':
query = {
'match': {
fields[0]: self.query_string,
}
}
else:
query = {
'match': {
fields[0]: {
'query': self.query_string,
'operator': self.operator,
}
}
}
else:
query = {
'multi_match': {
'query': self.query_string,
'fields': fields,
}
}
if self.operator != 'or':
query['multi_match']['operator'] = self.operator
else:
query = {
'match_all': {}
}
return query
def get_filters(self):
filters = []
# Filter by content type
filters.append({
'prefix': {
'content_type': self.queryset.model.indexed_get_content_type()
}
})
# Apply filters from queryset
queryset_filters = self._get_filters_from_queryset()
if queryset_filters:
filters.append(queryset_filters)
return filters
def get_query(self):
inner_query = self.get_inner_query()
filters = self.get_filters()
if len(filters) == 1:
return {
'filtered': {
'query': inner_query,
'filter': filters[0],
}
}
elif len(filters) > 1:
return {
'filtered': {
'query': inner_query,
'filter': {
'and': filters,
}
}
}
else:
return inner_query
def get_sort(self):
# Ordering by relevance is the default in Elasticsearch
if self.order_by_relevance:
return
# Get queryset and make sure its ordered
if self.queryset.ordered:
order_by_fields = self.queryset.query.order_by
sort = []
for order_by_field in order_by_fields:
reverse = False
field_name = order_by_field
if order_by_field.startswith('-'):
reverse = True
field_name = order_by_field[1:]
field = self._get_filterable_field(field_name)
column_name = self.mapping.get_field_column_name(field)
sort.append({
column_name: 'desc' if reverse else 'asc'
})
return sort
else:
# Order by pk field
return ['pk']
def __repr__(self):
return json.dumps(self.get_query())
class ElasticsearchSearchResults(BaseSearchResults):
def _get_es_body(self, for_count=False):
body = {
'query': self.query.get_query()
}
if not for_count:
sort = self.query.get_sort()
if sort is not None:
body['sort'] = sort
return body
def _do_search(self):
# Params for elasticsearch query
params = dict(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(),
_source=False,
fields='pk',
from_=self.start,
)
# Add size if set
if self.stop is not None:
params['size'] = self.stop - self.start
# Send to Elasticsearch
hits = self.backend.es.search(**params)
# Get pks from results
pks = [hit['fields']['pk'][0] for hit in hits['hits']['hits']]
# Initialise results dictionary
results = dict((str(pk), None) for pk in pks)
# Find objects in database and add them to dict
queryset = self.query.queryset.filter(pk__in=pks)
for obj in queryset:
results[str(obj.pk)] = obj
# Return results in order given by Elasticsearch
return [results[str(pk)] for pk in pks if results[str(pk)]]
def _do_count(self):
# Get count
hit_count = self.backend.es.count(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(for_count=True),
)['count']
# Add limits
hit_count -= self.start
if self.stop is not None:
hit_count = min(hit_count, self.stop - self.start)
return max(hit_count, 0)
class ElasticsearchIndex(object):
def __init__(self, backend, name):
self.backend = backend
self.es = backend.es
self.mapping_class = backend.mapping_class
self.name = name
def put(self):
self.es.indices.create(self.name, self.backend.settings)
def delete(self):
try:
self.es.indices.delete(self.name)
except NotFoundError:
pass
def exists(self):
return self.es.indices.exists(self.name)
def is_alias(self):
return self.es.indices.exists_alias(self.name)
def aliased_indices(self):
"""
If this index object represents an alias (which appear the same in the
Elasticsearch API), this method can be used to fetch the list of indices
the alias points to.
Use the is_alias method if you need to find out if this an alias. This
returns an empty list if called on an index.
"""
return [
self.backend.index_class(self.backend, index_name)
for index_name in self.es.indices.get_alias(name=self.name).keys()
]
def put_alias(self, name):
"""
Creates a new alias to this index. If the alias already exists it will
be repointed to this index.
"""
self.es.indices.put_alias(name=name, index=self.name)
def add_model(self, model):
# Get mapping
mapping = self.mapping_class(model)
# Put mapping
self.es.indices.put_mapping(
index=self.name, doc_type=mapping.get_document_type(), body=mapping.get_mapping()
)
def add_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Add document to index
self.es.index(
self.name, mapping.get_document_type(), mapping.get_document(item), id=mapping.get_document_id(item)
)
def add_items(self, model, items):
if not class_is_indexed(model):
return
# Get mapping
mapping = self.mapping_class(model)
doc_type = mapping.get_document_type()
# Create list of actions
actions = []
for item in items:
# Create the action
action = {
'_index': self.name,
'_type': doc_type,
'_id': mapping.get_document_id(item),
}
action.update(mapping.get_document(item))
actions.append(action)
# Run the actions
bulk(self.es, actions)
def delete_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Delete document
try:
self.es.delete(
self.name,
mapping.get_document_type(),
mapping.get_document_id(item),
)
except NotFoundError:
pass # Document doesn't exist, ignore this exception
def refresh(self):
self.es.indices.refresh(self.name)
def reset(self):
# Delete old index
self.delete()
# Create new index
self.put()
class ElasticsearchIndexRebuilder(object):
def __init__(self, index):
self.index = index
def reset_index(self):
self.index.reset()
def start(self):
# Reset the index
self.reset_index()
return self.index
def finish(self):
self.index.refresh()
class ElasticsearchAtomicIndexRebuilder(ElasticsearchIndexRebuilder):
def __init__(self, index):
self.alias = index
self.index = index.backend.index_class(
index.backend,
self.alias.name + '_' + get_random_string(7).lower()
)
def reset_index(self):
# Delete old index using the alias
# This should delete both the alias and the index
self.alias.delete()
# Create new index
self.index.put()
# Create a new alias
self.index.put_alias(self.alias.name)
def start(self):
# Create the new index
self.index.put()
return self.index
def finish(self):
self.index.refresh()
if self.alias.is_alias():
# Update existing alias, then delete the old index
# Find index that alias currently points to, we'll delete it after
# updating the alias
old_index = self.alias.aliased_indices()
# Update alias to point to new index
self.index.put_alias(self.alias.name)
# Delete old index
# aliased_indices() can return multiple indices. Delete them all
for index in old_index:
if index.name != self.index.name:
index.delete()
else:
# self.alias doesn't currently refer to an alias in Elasticsearch.
# This means that either nothing exists in ES with that name or
# there is currently an index with the that name
# Run delete on the alias, just in case it is currently an index.
# This happens on the first rebuild after switching ATOMIC_REBUILD on
self.alias.delete()
# Create the alias
self.index.put_alias(self.alias.name)
class ElasticsearchSearchBackend(BaseSearchBackend):
index_class = ElasticsearchIndex
query_class = ElasticsearchSearchQuery
results_class = ElasticsearchSearchResults
mapping_class = ElasticsearchMapping
basic_rebuilder_class = ElasticsearchIndexRebuilder
atomic_rebuilder_class = ElasticsearchAtomicIndexRebuilder
settings = {
'settings': {
'analysis': {
'analyzer': {
'ngram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'ngram']
},
'edgengram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'edgengram']
}
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15,
},
'edgengram_tokenizer': {
'type': 'edgeNGram',
'min_gram': 2,
'max_gram': 15,
'side': 'front'
}
},
'filter': {
'ngram': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15
},
'edgengram': {
'type': 'edgeNGram',
'min_gram': 1,
'max_gram': 15
}
}
}
}
}
def __init__(self, params):
super(ElasticsearchSearchBackend, self).__init__(params)
# Get settings
self.hosts = params.pop('HOSTS', None)
self.index_name = params.pop('INDEX', 'wagtail')
self.timeout = params.pop('TIMEOUT', 10)
if params.pop('ATOMIC_REBUILD', False):
self.rebuilder_class = self.atomic_rebuilder_class
else:
self.rebuilder_class = self.basic_rebuilder_class
# If HOSTS is not set, convert URLS setting to HOSTS
es_urls = params.pop('URLS', ['http://localhost:9200'])
if self.hosts is None:
self.hosts = []
for url in es_urls:
parsed_url = urlparse(url)
use_ssl = parsed_url.scheme == 'https'
port = parsed_url.port or (443 if use_ssl else 80)
http_auth = None
if parsed_url.username is not None and parsed_url.password is not None:
http_auth = (parsed_url.username, parsed_url.password)
self.hosts.append({
'host': parsed_url.hostname,
'port': port,
'url_prefix': parsed_url.path,
'use_ssl': use_ssl,
'verify_certs': use_ssl,
'http_auth': http_auth,
})
# Get Elasticsearch interface
# Any remaining params are passed into the Elasticsearch constructor
self.es = Elasticsearch(
hosts=self.hosts,
timeout=self.timeout,
**params)
def get_index_for_model(self, model):
return self.index_class(self, self.index_name)
def get_index(self):
return self.index_class(self, self.index_name)
def get_rebuilder(self):
return self.rebuilder_class(self.get_index())
def reset_index(self):
# Use the rebuilder to reset the index
self.get_rebuilder().reset_index()
def add_type(self, model):
self.get_index_for_model(model).add_model(model)
def refresh_index(self):
self.get_index().refresh()
def add(self, obj):
self.get_index_for_model(type(obj)).add_item(obj)
def add_bulk(self, model, obj_list):
self.get_index_for_model(model).add_items(model, obj_list)
def delete(self, obj):
self.get_index_for_model(type(obj)).delete_item(obj)
class ElasticSearch(ElasticsearchSearchBackend):
def __init__(self, params):
warnings.warn(
"The wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch has "
"been moved to wagtail.wagtailsearch.backends.elasticsearch.ElasticsearchSearchBackend",
category=RemovedInWagtail18Warning, stacklevel=2
)
super(ElasticSearch, self).__init__(params)
SearchBackend = ElasticsearchSearchBackend
|
|
"""Tests for the storage helper."""
import asyncio
from datetime import timedelta
import json
from unittest.mock import Mock, patch
import pytest
from homeassistant.const import (
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState
from homeassistant.helpers import storage
from homeassistant.util import dt
from tests.common import async_fire_time_changed
MOCK_VERSION = 1
MOCK_VERSION_2 = 2
MOCK_MINOR_VERSION_1 = 1
MOCK_MINOR_VERSION_2 = 2
MOCK_KEY = "storage-test"
MOCK_DATA = {"hello": "world"}
MOCK_DATA2 = {"goodbye": "cruel world"}
@pytest.fixture
def store(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(hass, MOCK_VERSION, MOCK_KEY)
@pytest.fixture
def store_v_1_1(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_1
)
@pytest.fixture
def store_v_1_2(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_2
)
@pytest.fixture
def store_v_2_1(hass):
"""Fixture of a store that prevents writing on Home Assistant stop."""
return storage.Store(
hass, MOCK_VERSION_2, MOCK_KEY, minor_version=MOCK_MINOR_VERSION_1
)
async def test_loading(hass, store):
"""Test we can save and load data."""
await store.async_save(MOCK_DATA)
data = await store.async_load()
assert data == MOCK_DATA
async def test_custom_encoder(hass):
"""Test we can save and load data."""
class JSONEncoder(json.JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY, encoder=JSONEncoder)
await store.async_save(Mock())
data = await store.async_load()
assert data == "9"
async def test_loading_non_existing(hass, store):
"""Test we can save and load data."""
with patch("homeassistant.util.json.open", side_effect=FileNotFoundError):
data = await store.async_load()
assert data is None
async def test_loading_parallel(hass, store, hass_storage, caplog):
"""Test we can save and load data."""
hass_storage[store.key] = {"version": MOCK_VERSION, "data": MOCK_DATA}
results = await asyncio.gather(store.async_load(), store.async_load())
assert results[0] == MOCK_DATA
assert results[0] is results[1]
assert caplog.text.count(f"Loading data for {store.key}")
async def test_saving_with_delay(hass, store, hass_storage):
"""Test saving data after a delay."""
store.async_delay_save(lambda: MOCK_DATA, 1)
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_saving_on_final_write(hass, hass_storage):
"""Test delayed saves trigger when we quit Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 5)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": MOCK_DATA,
}
async def test_not_delayed_saving_while_stopping(hass, hass_storage):
"""Test delayed saves don't write after the stop event has fired."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
hass.state = CoreState.stopping
store.async_delay_save(lambda: MOCK_DATA, 1)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_delayed_saving_after_stopping(hass, hass_storage):
"""Test delayed saves don't write after stop if issued before stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
store.async_delay_save(lambda: MOCK_DATA, 10)
assert store.key not in hass_storage
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.state = CoreState.stopping
await hass.async_block_till_done()
assert store.key not in hass_storage
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=15))
await hass.async_block_till_done()
assert store.key not in hass_storage
async def test_not_saving_while_stopping(hass, hass_storage):
"""Test saves don't write when stopping Home Assistant."""
store = storage.Store(hass, MOCK_VERSION, MOCK_KEY)
hass.state = CoreState.stopping
await store.async_save(MOCK_DATA)
assert store.key not in hass_storage
async def test_loading_while_delay(hass, store, hass_storage):
"""Test we load new data even if not written yet."""
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "yes"}
async def test_writing_while_writing_delay(hass, store, hass_storage):
"""Test a write while a write with delay is active."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_delay_save_calls(hass, store, hass_storage):
"""Test a write while a write with changing delays."""
store.async_delay_save(lambda: {"delay": "yes"}, 1)
store.async_delay_save(lambda: {"delay": "yes"}, 2)
store.async_delay_save(lambda: {"delay": "yes"}, 3)
assert store.key not in hass_storage
await store.async_save({"delay": "no"})
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"delay": "no"},
}
data = await store.async_load()
assert data == {"delay": "no"}
async def test_multiple_save_calls(hass, store, hass_storage):
"""Test multiple write tasks."""
assert store.key not in hass_storage
tasks = [store.async_save({"savecount": savecount}) for savecount in range(6)]
await asyncio.gather(*tasks)
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"savecount": 5},
}
data = await store.async_load()
assert data == {"savecount": 5}
async def test_migrator_no_existing_config(hass, store, hass_storage):
"""Test migrator with no existing config."""
with patch("os.path.isfile", return_value=False), patch.object(
store, "async_load", return_value={"cur": "config"}
):
data = await storage.async_migrator(hass, "old-path", store)
assert data == {"cur": "config"}
assert store.key not in hass_storage
async def test_migrator_existing_config(hass, store, hass_storage):
"""Test migrating existing config."""
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"}
)
assert len(mock_remove.mock_calls) == 1
assert data == {"old": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": 1,
"data": data,
}
async def test_migrator_transforming_config(hass, store, hass_storage):
"""Test migrating config to new format."""
async def old_conf_migrate_func(old_config):
"""Migrate old config to new format."""
return {"new": old_config["old"]}
with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
data = await storage.async_migrator(
hass,
"old-path",
store,
old_conf_migrate_func=old_conf_migrate_func,
old_conf_load_func=lambda _: {"old": "config"},
)
assert len(mock_remove.mock_calls) == 1
assert data == {"new": "config"}
assert hass_storage[store.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": 1,
"data": data,
}
async def test_minor_version_default(hass, store, hass_storage):
"""Test minor version default."""
await store.async_save(MOCK_DATA)
assert hass_storage[store.key]["minor_version"] == 1
async def test_minor_version(hass, store_v_1_2, hass_storage):
"""Test minor version."""
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key]["minor_version"] == MOCK_MINOR_VERSION_2
async def test_migrate_major_not_implemented_raises(hass, store, store_v_2_1):
"""Test migrating between major versions fails if not implemented."""
await store_v_2_1.async_save(MOCK_DATA)
with pytest.raises(NotImplementedError):
await store.async_load()
async def test_migrate_minor_not_implemented(
hass, hass_storage, store_v_1_1, store_v_1_2
):
"""Test migrating between minor versions does not fail if not implemented."""
assert store_v_1_1.key == store_v_1_2.key
await store_v_1_1.async_save(MOCK_DATA)
assert hass_storage[store_v_1_1.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_1,
"data": MOCK_DATA,
}
data = await store_v_1_2.async_load()
assert hass_storage[store_v_1_1.key]["data"] == data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
async def test_migration(hass, hass_storage, store_v_1_2):
"""Test migration."""
calls = 0
class CustomStore(storage.Store):
async def _async_migrate_func(
self, old_major_version, old_minor_version, old_data: dict
):
nonlocal calls
calls += 1
assert old_major_version == store_v_1_2.version
assert old_minor_version == store_v_1_2.minor_version
return old_data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
assert calls == 0
legacy_store = CustomStore(hass, 2, store_v_1_2.key, minor_version=1)
data = await legacy_store.async_load()
assert calls == 1
assert hass_storage[store_v_1_2.key]["data"] == data
await legacy_store.async_save(MOCK_DATA)
assert hass_storage[legacy_store.key] == {
"key": MOCK_KEY,
"version": 2,
"minor_version": 1,
"data": MOCK_DATA,
}
async def test_legacy_migration(hass, hass_storage, store_v_1_2):
"""Test legacy migration method signature."""
calls = 0
class LegacyStore(storage.Store):
async def _async_migrate_func(self, old_version, old_data: dict):
nonlocal calls
calls += 1
assert old_version == store_v_1_2.version
return old_data
await store_v_1_2.async_save(MOCK_DATA)
assert hass_storage[store_v_1_2.key] == {
"key": MOCK_KEY,
"version": MOCK_VERSION,
"minor_version": MOCK_MINOR_VERSION_2,
"data": MOCK_DATA,
}
assert calls == 0
legacy_store = LegacyStore(hass, 2, store_v_1_2.key, minor_version=1)
data = await legacy_store.async_load()
assert calls == 1
assert hass_storage[store_v_1_2.key]["data"] == data
await legacy_store.async_save(MOCK_DATA)
assert hass_storage[legacy_store.key] == {
"key": MOCK_KEY,
"version": 2,
"minor_version": 1,
"data": MOCK_DATA,
}
async def test_changing_delayed_written_data(hass, store, hass_storage):
"""Test changing data that is written with delay."""
data_to_store = {"hello": "world"}
store.async_delay_save(lambda: data_to_store, 1)
assert store.key not in hass_storage
loaded_data = await store.async_load()
assert loaded_data == data_to_store
assert loaded_data is not data_to_store
loaded_data["hello"] = "earth"
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
assert hass_storage[store.key] == {
"version": MOCK_VERSION,
"minor_version": 1,
"key": MOCK_KEY,
"data": {"hello": "world"},
}
|
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import str
import sys
import pmagpy.pmag as pmag
def main(command_line=True, **kwargs):
"""
NAME
jr6_magic.py
DESCRIPTION
converts JR6 format files to magic_measurements format files
SYNTAX
jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
# -Fsa: specify er_samples format file for appending, default is new er_samples.txt
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
INPUT
JR6 .txt format file
"""
# initialize some stuff
noave=0
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=1
MagRecs=[]
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
user=""
mag_file=""
dir_path='.'
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args:
noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
# format variables
mag_file = input_dir_path+"/" + mag_file
meas_file = output_dir_path+"/" + meas_file
samp_file = output_dir_path+"/" + samp_file
if specnum!=0:
specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False
else:
Z=samp_con.split("-")[1]
samp_con="7"
ErSampRec,ErSiteRec={},{}
# parse data
data=open(mag_file,'r')
line=data.readline()
line=data.readline()
line=data.readline()
while line !='':
parsedLine=line.split()
sampleName=parsedLine[0]
demagLevel=parsedLine[2]
date=parsedLine[3]
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
parsedLine=line.split()
specimenAngleDec=parsedLine[1]
specimenAngleInc=parsedLine[2]
while parsedLine[0] != 'MEAN' :
line=data.readline()
parsedLine=line.split()
if len(parsedLine) == 0:
parsedLine=["Hello"]
Mx=parsedLine[1]
My=parsedLine[2]
Mz=parsedLine[3]
line=data.readline()
line=data.readline()
parsedLine=line.split()
splitExp = parsedLine[2].split('A')
intensityStr=parsedLine[1] + splitExp[0]
intensity = float(intensityStr)
# check and see if Prec is too big and messes with the parcing.
precisionStr=''
if len(parsedLine) == 6: #normal line
precisionStr=parsedLine[5][0:-1]
else:
precisionStr=parsedLine[4][0:-1]
precisionPer = float(precisionStr)
precision=intensity*precisionPer/100
while parsedLine[0] != 'SPEC.' :
line=data.readline()
parsedLine=line.split()
if len(parsedLine) == 0:
parsedLine=["Hello"]
specimenDec=parsedLine[2]
specimenInc=parsedLine[3]
line=data.readline()
line=data.readline()
parsedLine=line.split()
geographicDec=parsedLine[1]
geographicInc=parsedLine[2]
# Add data to various MagIC data tables.
er_specimen_name = sampleName
if specnum!=0:
er_sample_name=er_specimen_name[:specnum]
else:
er_sample_name=er_specimen_name
if int(samp_con) in [1, 2, 3, 4, 5, 7]:
er_site_name=pmag.parse_site(er_sample_name,samp_con,Z)
# else:
# if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name']
# if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name']
# check sample list(SampOuts) to see if sample already exists in list before adding new sample info
sampleFlag=0
for sampRec in SampOuts:
if sampRec['er_sample_name'] == er_sample_name:
sampleFlag=1
break
if sampleFlag == 0:
ErSampRec['er_sample_name']=er_sample_name
ErSampRec['sample_azimuth']=specimenAngleDec
ErSampRec['sample_dip']=specimenAngleInc
ErSampRec['magic_method_codes']=meth_code
ErSampRec['er_location_name']=er_location_name
ErSampRec['er_site_name']=er_site_name
ErSampRec['er_citation_names']='This study'
SampOuts.append(ErSampRec.copy())
MagRec={}
MagRec['measurement_description']='Date: '+date
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=er_site_name
MagRec['er_sample_name']=er_sample_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=er_specimen_name
MagRec["treatment_ac_field"]='0'
if demagLevel == 'NRM':
meas_type="LT-NO"
elif demagLevel[0] == 'A':
meas_type="LT-AF-Z"
treat=float(demagLevel[1:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif demagLevel[0] == 'T':
meas_type="LT-T-Z"
treat=float(demagLevel[1:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
else:
print("measurement type unknown")
return False
# X=[float(Mx),float(My),float(Mz)]
# Vec=pmag.cart2dir(X)
# MagRec["measurement_magn_moment"]='%10.3e'% (Vec[2]) # Am^2
MagRec["measurement_magn_moment"]=str(intensity*0.025*0.025*0.025) # Am^2 assume 2.5cm cube sample
MagRec["measurement_magn_volume"]=intensityStr
MagRec["measurement_dec"]=specimenDec
MagRec["measurement_inc"]=specimenInc
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
#read lines till end of record
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
# read all the rest of the special characters. Some data files not consistantly formatted.
while (len(line) <=3 and line!=''):
line=data.readline()
#end of data while loop
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(samp_file,SampOuts,'er_samples')
print("sample orientations put in ",samp_file)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
return True
def do_help():
return main.__doc__
if __name__ == "__main__":
main()
|
|
"""
Test helper functions from numba.numpy_support.
"""
from __future__ import print_function
import sys
import numpy as np
import numba.unittest_support as unittest
from numba import config, numpy_support, types
from .support import TestCase, skip_on_numpy_16
class TestFromDtype(TestCase):
def test_number_types(self):
"""
Test from_dtype() and as_dtype() with the various scalar number types.
"""
f = numpy_support.from_dtype
def check(typechar, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typechar)
self.assertIs(f(dtype), numba_type)
self.assertIs(f(np.dtype('=' + typechar)), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('?', types.bool_)
check('f', types.float32)
check('f4', types.float32)
check('d', types.float64)
check('f8', types.float64)
check('F', types.complex64)
check('c8', types.complex64)
check('D', types.complex128)
check('c16', types.complex128)
check('b', types.int8)
check('i1', types.int8)
check('B', types.uint8)
check('u1', types.uint8)
check('h', types.int16)
check('i2', types.int16)
check('H', types.uint16)
check('u2', types.uint16)
check('i', types.int32)
check('i4', types.int32)
check('I', types.uint32)
check('u4', types.uint32)
check('q', types.int64)
check('Q', types.uint64)
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intp', 'uintp'):
self.assertIs(f(np.dtype(name)), getattr(types, name))
# Non-native alignments are unsupported (except for 1-byte types)
foreign_align = '>' if sys.byteorder == 'little' else '<'
for letter in 'hHiIlLqQfdFD':
self.assertRaises(NotImplementedError, f,
np.dtype(foreign_align + letter))
def test_string_types(self):
"""
Test from_dtype() and as_dtype() with the character string types.
"""
def check(typestring, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typestring)
self.assertEqual(numpy_support.from_dtype(dtype), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('S10', types.CharSeq(10))
check('a11', types.CharSeq(11))
check('U12', types.UnicodeCharSeq(12))
@skip_on_numpy_16
def check_datetime_types(self, letter, nb_class):
def check(dtype, numba_type, code):
tp = numpy_support.from_dtype(dtype)
self.assertEqual(tp, numba_type)
self.assertEqual(tp.unit_code, code)
self.assertEqual(numpy_support.as_dtype(numba_type), dtype)
self.assertEqual(numpy_support.as_dtype(tp), dtype)
# Unit-less ("generic") type
check(np.dtype(letter), nb_class(''), 14)
def test_datetime_types(self):
"""
Test from_dtype() and as_dtype() with the datetime types.
"""
self.check_datetime_types('M', types.NPDatetime)
def test_timedelta_types(self):
"""
Test from_dtype() and as_dtype() with the timedelta types.
"""
self.check_datetime_types('m', types.NPTimedelta)
def test_struct_types(self):
def check(dtype, fields, size, aligned):
tp = numpy_support.from_dtype(dtype)
self.assertIsInstance(tp, types.Record)
# Only check for dtype equality, as the Numba type may be interned
self.assertEqual(tp.dtype, dtype)
self.assertEqual(tp.fields, fields)
self.assertEqual(tp.size, size)
self.assertEqual(tp.aligned, aligned)
dtype = np.dtype([('a', np.int16), ('b', np.int32)])
check(dtype,
fields={'a': (types.int16, 0),
'b': (types.int32, 2)},
size=6, aligned=False)
dtype = np.dtype([('a', np.int16), ('b', np.int32)], align=True)
check(dtype,
fields={'a': (types.int16, 0),
'b': (types.int32, 4)},
size=8, aligned=True)
dtype = np.dtype([('m', np.int32), ('n', 'S5')])
check(dtype,
fields={'m': (types.int32, 0),
'n': (types.CharSeq(5), 4)},
size=9, aligned=False)
class ValueTypingTestBase(object):
"""
Common tests for the typing of values. Also used by test_special.
"""
def check_number_values(self, func):
"""
Test *func*() with scalar numeric values.
"""
f = func
# Standard Python types get inferred by numpy
self.assertIn(f(1), (types.int32, types.int64))
self.assertIs(f(1.0), types.float64)
self.assertIs(f(1.0j), types.complex128)
self.assertIs(f(True), types.bool_)
self.assertIs(f(False), types.bool_)
# Numpy scalar types get converted by from_dtype()
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intc', 'uintc', 'intp', 'uintp',
'float32', 'float64', 'complex64', 'complex128',
'bool_'):
val = getattr(np, name)()
self.assertIs(f(val), getattr(types, name))
def _base_check_datetime_values(self, func, np_type, nb_type):
f = func
for unit in [
'', 'Y', 'M', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as']:
if unit:
t = np_type(3, unit)
else:
# "generic" datetime / timedelta
t = np_type('Nat')
tp = f(t)
# This ensures the unit hasn't been lost
self.assertEqual(tp, nb_type(unit))
@skip_on_numpy_16
def check_datetime_values(self, func):
"""
Test *func*() with np.datetime64 values.
"""
self._base_check_datetime_values(func, np.datetime64, types.NPDatetime)
@skip_on_numpy_16
def check_timedelta_values(self, func):
"""
Test *func*() with np.timedelta64 values.
"""
self._base_check_datetime_values(func, np.timedelta64, types.NPTimedelta)
class TestArrayScalars(ValueTypingTestBase, TestCase):
def test_number_values(self):
"""
Test map_arrayscalar_type() with scalar number values.
"""
self.check_number_values(numpy_support.map_arrayscalar_type)
def test_datetime_values(self):
"""
Test map_arrayscalar_type() with np.datetime64 values.
"""
f = numpy_support.map_arrayscalar_type
self.check_datetime_values(f)
# datetime64s with a non-one factor shouldn't be supported
t = np.datetime64('2014', '10Y')
with self.assertRaises(NotImplementedError):
f(t)
def test_timedelta_values(self):
"""
Test map_arrayscalar_type() with np.timedelta64 values.
"""
f = numpy_support.map_arrayscalar_type
self.check_timedelta_values(f)
# timedelta64s with a non-one factor shouldn't be supported
t = np.timedelta64(10, '10Y')
with self.assertRaises(NotImplementedError):
f(t)
class FakeUFunc(object):
__slots__ = ('nin', 'nout', 'types', 'ntypes')
def __init__(self, types):
self.types = types
in_, out = self.types[0].split('->')
self.nin = len(in_)
self.nout = len(out)
self.ntypes = len(types)
for tp in types:
in_, out = self.types[0].split('->')
assert len(in_) == self.nin
assert len(out) == self.nout
# Typical types for np.add, np.multiply
_add_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I',
'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d',
'gg->g', 'FF->F', 'DD->D', 'GG->G', 'Mm->M', 'mm->m', 'mM->M',
'OO->O']
_mul_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I',
'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d',
'gg->g', 'FF->F', 'DD->D', 'GG->G', 'mq->m', 'qm->m', 'md->m',
'dm->m', 'OO->O']
class TestUFuncs(TestCase):
"""
Test ufunc helpers.
"""
def test_ufunc_find_matching_loop(self):
f = numpy_support.ufunc_find_matching_loop
np_add = FakeUFunc(_add_types)
np_mul = FakeUFunc(_mul_types)
def check(ufunc, input_types, sigs, output_types=()):
"""
Check that ufunc_find_matching_loop() finds one of the given
*sigs* for *ufunc*, *input_types* and optional *output_types*.
"""
loop = f(ufunc, input_types + output_types)
self.assertTrue(loop)
if isinstance(sigs, str):
sigs = (sigs,)
self.assertIn(loop.ufunc_sig, sigs)
self.assertEqual(len(loop.numpy_inputs), len(loop.inputs))
self.assertEqual(len(loop.numpy_outputs), len(loop.outputs))
if not output_types:
# Add explicit outputs and check the result is the same
loop_explicit = f(ufunc, list(input_types) + loop.outputs)
self.assertEqual(loop_explicit, loop)
else:
self.assertEqual(loop.outputs, list(output_types))
# Round-tripping inputs and outputs
loop_rt = f(ufunc, loop.inputs + loop.outputs)
self.assertEqual(loop_rt, loop)
return loop
def check_exact(ufunc, input_types, sigs, output_types=()):
loop = check(ufunc, input_types, sigs, output_types)
self.assertEqual(loop.inputs, list(input_types))
def check_no_match(ufunc, input_types):
loop = f(ufunc, input_types)
self.assertIs(loop, None)
# Exact matching for number types
check_exact(np_add, (types.bool_, types.bool_), '??->?')
check_exact(np_add, (types.int8, types.int8), 'bb->b')
check_exact(np_add, (types.uint8, types.uint8), 'BB->B')
check_exact(np_add, (types.int64, types.int64), ('ll->l', 'qq->q'))
check_exact(np_add, (types.uint64, types.uint64), ('LL->L', 'QQ->Q'))
check_exact(np_add, (types.float32, types.float32), 'ff->f')
check_exact(np_add, (types.float64, types.float64), 'dd->d')
check_exact(np_add, (types.complex64, types.complex64), 'FF->F')
check_exact(np_add, (types.complex128, types.complex128), 'DD->D')
# Exact matching for datetime64 and timedelta64 types
check_exact(np_add, (types.NPTimedelta('s'), types.NPTimedelta('s')),
'mm->m', output_types=(types.NPTimedelta('s'),))
check_exact(np_add, (types.NPTimedelta('ms'), types.NPDatetime('s')),
'mM->M', output_types=(types.NPDatetime('ms'),))
check_exact(np_add, (types.NPDatetime('s'), types.NPTimedelta('s')),
'Mm->M', output_types=(types.NPDatetime('s'),))
check_exact(np_mul, (types.NPTimedelta('s'), types.int64),
'mq->m', output_types=(types.NPTimedelta('s'),))
check_exact(np_mul, (types.float64, types.NPTimedelta('s')),
'dm->m', output_types=(types.NPTimedelta('s'),))
# Mix and match number types, with casting
check(np_add, (types.bool_, types.int8), 'bb->b')
check(np_add, (types.uint8, types.bool_), 'BB->B')
check(np_add, (types.int16, types.uint16), 'ii->i')
check(np_add, (types.complex64, types.float64), 'DD->D')
check(np_add, (types.float64, types.complex64), 'DD->D')
# With some timedelta64 arguments as well
check(np_mul, (types.NPTimedelta('s'), types.int32),
'mq->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.NPTimedelta('s'), types.uint32),
'mq->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.NPTimedelta('s'), types.float32),
'md->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.float32, types.NPTimedelta('s')),
'dm->m', output_types=(types.NPTimedelta('s'),))
# No match
check_no_match(np_add, (types.NPDatetime('s'), types.NPDatetime('s')))
# No implicit casting from int64 to timedelta64 (Numpy would allow
# this).
check_no_match(np_add, (types.NPTimedelta('s'), types.int64))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import numpy as np
from numpy import random
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_array_equal, assert_array_almost_equal,
suppress_warnings)
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2 ** 16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2 ** 16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_(np.random.laplace(scale=0) in [0, 1])
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg] * 10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig] * 10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10 ** 6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
|
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import server_external_events \
as server_external_events_v21
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
fake_instances = {
'00000000-0000-0000-0000-000000000001': objects.Instance(id=1,
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': objects.Instance(id=2,
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': objects.Instance(id=3,
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': objects.Instance(id=4,
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
fake_cells = [objects.CellMapping(uuid=uuids.cell1, database_connection="db1"),
objects.CellMapping(uuid=uuids.cell2, database_connection="db2")]
fake_instance_mappings = [
objects.InstanceMapping(cell_mapping=fake_cells[instance.id % 2],
instance_uuid=instance.uuid)
for instance in fake_instances.values()]
@classmethod
def fake_get_by_filters(cls, context, filters, expected_attrs=None):
if expected_attrs:
# This is a regression check for bug 1645479.
expected_attrs_set = set(expected_attrs)
full_expected_attrs_set = set(instance_obj.INSTANCE_OPTIONAL_ATTRS)
assert expected_attrs_set.issubset(full_expected_attrs_set), \
('%s is not a subset of %s' % (expected_attrs_set,
full_expected_attrs_set))
l = objects.InstanceList(objects=[
inst for inst in fake_instances.values()
if inst.uuid in filters['uuid']])
return l
@classmethod
def fake_get_by_instance_uuids(cls, context, uuids):
mappings = [im for im in fake_instance_mappings
if im.instance_uuid in uuids]
return objects.InstanceMappingList(objects=mappings)
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids',
fake_get_by_instance_uuids)
@mock.patch('nova.objects.InstanceList.get_by_filters',
fake_get_by_filters)
class ServerExternalEventsTestV21(test.NoDBTestCase):
server_external_events = server_external_events_v21
invalid_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerExternalEventsTestV21, self).setUp()
self.api = \
self.server_external_events.ServerExternalEventsController()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0],
'status': 'completed'}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
self.req = fakes.HTTPRequest.blank('', use_admin_context=True,
version=self.wsgi_api_version)
def _assert_call(self, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(self.req, body=body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
call = api_method.call_args_list[0]
args = call[0]
call_instances = args[1]
call_events = args[2]
self.assertEqual(set(expected_uuids),
set([instance.uuid for instance in call_instances]))
self.assertEqual(len(expected_uuids), len(call_instances))
self.assertEqual(set(expected_events),
set([event.name for event in call_events]))
self.assertEqual(len(expected_events),
len(call_events))
return result, code
def test_create(self):
result, code = self._assert_call(self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
result, code = self._assert_call(body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, self.req, body=body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_events(self):
body = {'events': 'foo'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_unknown_events(self):
self.event_1['name'] = 'unkown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids',
fake_get_by_instance_uuids)
@mock.patch('nova.objects.InstanceList.get_by_filters',
fake_get_by_filters)
class ServerExternalEventsTestV251(ServerExternalEventsTestV21):
wsgi_api_version = '2.51'
def test_create_with_missing_tag(self):
body = self.default_body
body['events'][1]['name'] = 'volume-extended'
result, code = self._assert_call(body,
[fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual('completed', result['events'][0]['status'])
self.assertEqual(400, result['events'][1]['code'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(207, code)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import datetime
import json
import time
import types
from cStringIO import StringIO
from functools import wraps
from struct import Struct
try:
from google.appengine.api import users
from google.appengine.ext import db
except ImportError: # Allow running outside google app engine
pass
try:
import cPickle as pickle
except ImportError:
import pickle
_serializers = dict()
_ushortStruct = Struct("<H")
_intStruct = Struct("<i")
_longStruct = Struct("<q")
_longLongStruct = Struct("<q")
_doubleStruct = Struct("<d")
class CustomProperty(object):
@staticmethod
def get_serializer():
raise NotImplementedError()
@staticmethod
def get_deserializer():
raise NotImplementedError()
def register(type_, serializer, deserializer):
_serializers[type_] = (serializer, deserializer)
def serialize(type_, obj):
stream = StringIO()
_serializers[type_][0](stream, obj)
return stream.getvalue()
def deserialize(type_, stream):
if isinstance(stream, (str, unicode)):
stream = StringIO(stream)
if isinstance(stream, db.Blob):
stream = StringIO(str(stream))
return _serializers[type_][1](stream)
def get_serializer(type_):
return _serializers[type_][0]
def get_deserializer(type_):
return _serializers[type_][1]
def serializer(f):
@wraps(f)
def wrapped(stream, obj, *args, **kwargs):
if obj is None:
stream.write('0')
else:
stream.write('1')
f(stream, obj, *args, **kwargs)
return wrapped
def deserializer(f):
@wraps(f)
def wrapped(stream, *args, **kwargs):
if stream.read(1) == '0':
return None
else:
return f(stream, *args, **kwargs)
return wrapped
@serializer
def s_str(stream, obj):
stream.write(_intStruct.pack(len(obj)))
stream.write(obj)
@deserializer
def ds_str(stream):
(size,) = _intStruct.unpack(stream.read(_intStruct.size))
return stream.read(size)
register(str, s_str, ds_str)
@serializer
def s_unicode(stream, obj):
s_str(stream, obj.encode("UTF-8"))
@deserializer
def ds_unicode(stream):
s = ds_str(stream)
return None if s is None else s.decode("UTF-8")
register(unicode, s_unicode, ds_unicode)
@serializer
def s_bool(stream, obj):
stream.write('1' if obj else '0')
@deserializer
def ds_bool(stream):
return stream.read(1) == '1'
register(bool, s_bool, ds_bool)
@serializer
def s_ushort(stream, obj):
stream.write(_ushortStruct.pack(obj))
@deserializer
def ds_ushort(stream):
(value,) = _ushortStruct.unpack(stream.read(_ushortStruct.size))
return value
@serializer
def s_long(stream, obj):
stream.write(_longStruct.pack(obj))
@deserializer
def ds_long(stream):
(value,) = _longStruct.unpack(stream.read(_longStruct.size))
return value
register(int, s_long, ds_long)
@serializer
def s_long_long(stream, obj):
stream.write(_longLongStruct.pack(obj))
@deserializer
def ds_long_long(stream):
(value,) = _longLongStruct.unpack(stream.read(_longLongStruct.size))
return value
register(long, s_long_long, ds_long_long)
@serializer
def s_float(stream, obj):
stream.write(_doubleStruct.pack(obj))
@deserializer
def ds_float(stream):
(value,) = _doubleStruct.unpack(stream.read(_doubleStruct.size))
return value
register(float, s_float, ds_float)
@serializer
def s_dict(stream, obj):
s_unicode(stream, json.dumps(obj))
@deserializer
def ds_dict(stream):
return json.loads(ds_unicode(stream))
register(dict, s_dict, ds_dict)
@serializer
def s_datetime(stream, obj):
s_long(stream, int(time.mktime(obj.timetuple())))
@deserializer
def ds_datetime(stream):
return datetime.datetime.fromtimestamp(ds_long(stream))
register(datetime.datetime, s_datetime, ds_datetime)
@serializer
def s_key(stream, obj):
s_str(stream, str(obj))
@deserializer
def ds_key(stream):
return db.Key(ds_str(stream))
if 'db' in locals():
register(db.Key, s_key, ds_key)
@serializer
def s_any(stream, obj):
pickle.dump(obj, stream, protocol=pickle.HIGHEST_PROTOCOL)
@deserializer
def ds_any(stream):
return pickle.load(stream)
@serializer
def s_user(stream, obj):
s_unicode(stream, obj.email())
@deserializer
def ds_user(stream):
return users.User(ds_unicode(stream))
def _get_model_properties(model):
props = model.properties()
if not hasattr(model, 'MC_ATTRIBUTES_HASH'):
prop_keys = sorted(props.keys())
prop_key_hash = hash(','.join(prop_keys))
model.MC_ATTRIBUTES_HASH = prop_key_hash
model.MC_ATTRIBUTES = prop_keys
else:
prop_keys = model.MC_ATTRIBUTES
prop_key_hash = model.MC_ATTRIBUTES_HASH
return prop_key_hash, prop_keys, props
@serializer
def s_model(stream, obj, clazz=None):
if clazz is None:
clazz = obj.__class__
hash_, keys, properties = _get_model_properties(clazz)
s_long(stream, hash_)
s_str(stream, str(obj.key()))
for key in keys:
prop = properties[key]
value = getattr(obj, key)
if prop.__class__ in (db.StringProperty, db.PostalAddressProperty, db.EmailProperty, db.PhoneNumberProperty):
s_unicode(stream, value)
elif prop.__class__ in (db.IntegerProperty, db.RatingProperty):
s_long(stream, value)
elif prop.__class__ == db.DateTimeProperty:
s_datetime(stream, value)
elif prop.__class__ == db.UserProperty:
s_str(stream, value.email() if value else None)
elif prop.__class__ == db.BooleanProperty:
s_bool(stream, value)
elif prop.__class__ == db.TextProperty:
s_unicode(stream, value)
elif prop.__class__ == db.StringListProperty:
s_long(stream, len(value))
for s in value:
s_unicode(stream, s)
elif prop.__class__ == db.ListProperty and prop.item_type == bool:
s_long(stream, len(value))
for b in value:
s_bool(stream, b)
elif prop.__class__ == db.ListProperty and (prop.item_type == int or prop.item_type == long):
s_long(stream, len(value))
for i in value:
s_long(stream, i)
elif prop.__class__ == db.ListProperty and prop.item_type == users.User:
s_long(stream, len(value))
for u in value:
s_user(stream, u)
elif isinstance(prop, CustomProperty):
prop.get_serializer()(stream, value)
elif prop.__class__ == db.polymodel._ClassKeyProperty:
continue
else:
raise NotImplementedError("Can not serialize %s instances" % prop.__class__)
@deserializer
def ds_model(stream, cls):
return model_deserializer(stream, cls)
def model_deserializer(stream, cls):
hash_, keys, properties = _get_model_properties(cls)
inst_hash = ds_long(stream)
if hash_ != inst_hash:
raise SerializedObjectOutOfDateException()
kwargs = dict()
model_key = ds_str(stream)
for property_name in keys:
prop = properties[property_name]
if prop.__class__ in (db.StringProperty, db.PostalAddressProperty, db.EmailProperty, db.PhoneNumberProperty):
value = ds_unicode(stream)
elif prop.__class__ in (db.IntegerProperty, db.RatingProperty):
value = ds_long(stream)
elif prop.__class__ == db.DateTimeProperty:
value = ds_datetime(stream)
elif prop.__class__ == db.UserProperty:
value = ds_str(stream)
if value:
value = users.User(value)
elif prop.__class__ == db.BooleanProperty:
value = ds_bool(stream)
elif prop.__class__ == db.TextProperty:
value = ds_unicode(stream)
elif prop.__class__ == db.StringListProperty:
length = ds_long(stream)
value = [ds_unicode(stream) for _ in xrange(length)]
elif prop.__class__ == db.ListProperty and prop.item_type == bool:
length = ds_long(stream)
value = [ds_bool(stream) for _ in xrange(length)]
elif prop.__class__ == db.ListProperty and (prop.item_type == int or prop.item_type == long):
length = ds_long(stream)
value = [ds_long(stream) for _ in xrange(length)]
elif prop.__class__ == db.ListProperty and prop.item_type == users.User:
length = ds_long(stream)
value = [ds_user(stream) for _ in xrange(length)]
elif isinstance(prop, CustomProperty):
value = prop.get_deserializer()(stream)
elif prop.__class__ == db.polymodel._ClassKeyProperty:
continue
else:
raise NotImplementedError("Can not deserialize %s instances" % prop.__class__)
kwargs[property_name] = value
return cls(key=db.Key(model_key), **kwargs)
def get_list_serializer(func):
@serializer
def s_list(stream, obj):
if isinstance(obj, types.GeneratorType):
obj = list(obj)
stream.write(_intStruct.pack(len(obj)))
for o in obj:
func(stream, o)
return s_list
def get_list_deserializer(func, needsVersionArg=False):
if needsVersionArg:
@deserializer
def ds_list_version(stream, version):
(size,) = _intStruct.unpack(stream.read(_intStruct.size))
return [func(stream, version) for _ in xrange(size)]
return ds_list_version
else:
@deserializer
def ds_list(stream):
(size,) = _intStruct.unpack(stream.read(_intStruct.size))
return [func(stream) for _ in xrange(size)]
return ds_list
class List(object):
def __init__(self, type_):
self.type = type_
def __hash__(self):
return hash("List") + hash(self.type)
def __eq__(self, other):
return hash(self) == hash(other)
s_str_list = get_list_serializer(s_str)
ds_str_list = get_list_deserializer(ds_str)
register(List(str), s_str_list, ds_str_list)
s_unicode_list = get_list_serializer(s_unicode)
ds_unicode_list = get_list_deserializer(ds_unicode)
register(List(unicode), s_unicode_list, ds_unicode_list)
s_bool_list = get_list_serializer(s_bool)
ds_bool_list = get_list_deserializer(ds_bool)
register(List(bool), s_bool_list, ds_bool_list)
s_long_list = get_list_serializer(s_long)
ds_long_list = get_list_deserializer(ds_long)
register(List(long), s_long_list, ds_long_list)
s_float_list = get_list_serializer(s_float)
ds_float_list = get_list_deserializer(ds_float)
register(List(float), s_float_list, ds_float_list)
if 'users' in locals():
register(users.User, s_user, ds_user)
s_user_list = get_list_serializer(s_user)
ds_user_list = get_list_deserializer(ds_user)
register(List(users.User), s_user_list, ds_user_list)
class SerializedObjectOutOfDateException(Exception):
pass
|
|
#!/usr/bin/env python
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
from __future__ import print_function, unicode_literals
import argparse
import codecs
import difflib
import fnmatch
import io
import multiprocessing
import os
import posixpath
import signal
import subprocess
import sys
import traceback
import tempfile
from functools import partial
from lib.util import get_buildtools_executable
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,mm'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for f in files:
if recursive and os.path.isdir(f):
for dirpath, dnames, fnames in os.walk(f):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for fp in fpaths:
ext = os.path.splitext(f)[1][1:]
print(ext)
if ext in extensions:
out.append(fp)
else:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
return out
def make_diff(diff_file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='a/{}'.format(diff_file),
tofile='b/{}'.format(diff_file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file_name):
try:
ret = run_clang_format_diff(args, file_name)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(
file_name, e.__class__.__name__, e), e)
def run_clang_format_diff(args, file_name):
try:
with io.open(file_name, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file_name]
if args.fix:
invocation.append('-i')
try:
proc = subprocess.Popen(
' '.join(invocation),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True)
except OSError as exc:
raise DiffError(str(exc))
proc_stdout = proc.stdout
proc_stderr = proc.stderr
if sys.version_info[0] == 3:
proc_stdout = proc_stdout.detach()
proc_stderr = proc_stderr.detach()
# make the pipes compatible with Python 3,
# reading lines should output unicode
encoding = 'utf-8'
proc_stdout = codecs.getreader(encoding)(proc_stdout)
proc_stderr = codecs.getreader(encoding)(proc_stderr)
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError("clang-format exited with status {}: '{}'".format(
proc.returncode, file_name), errs)
if args.fix:
return None, errs
if sys.platform == 'win32':
file_name = file_name.replace(os.sep, posixpath.sep)
return make_diff(file_name, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if sys.version_info[0] < 3:
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default=get_buildtools_executable('clang-format'))
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'--fix',
help='if specified, reformat files in-place',
action='store_true')
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true')
parser.add_argument(
'-c',
'--changed',
action='store_true',
help='only run on changed files')
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
retcode = ExitStatus.SUCCESS
parse_files = []
if args.changed:
popen = subprocess.Popen(
'git diff --name-only --cached',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True
)
for line in popen.stdout:
file_name = line.rstrip()
# don't check deleted files
if os.path.isfile(file_name):
parse_files.append(file_name)
else:
parse_files = args.files
files = list_files(
parse_files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if not args.fix:
patch_file = tempfile.NamedTemporaryFile(delete=False,
prefix='electron-format-')
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.fix:
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
for line in outs:
patch_file.write(line.encode('utf-8'))
patch_file.write('\n'.encode('utf-8'))
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
if not args.fix:
if patch_file.tell() == 0:
patch_file.close()
os.unlink(patch_file.name)
else:
print("\nTo patch these files, run:\n$ git apply {}\n"
.format(patch_file.name))
return retcode
if __name__ == '__main__':
sys.exit(main())
|
|
'''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
Modified by whale, 7 January 2015
'''
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
import cPickle
# TL; DR, the main training process starts on line: 250,
# you may want to start reading the code from there
##############################################################################
# parameters #################################################################
##############################################################################
# A, paths
train = 'train.csv' # path to training file
test = 'train_day31.csv' # path to testing file
submission = 'logisticonehot3.csv' # path of to be outputted submission file
# B, model
alpha = .45 # learning rate
beta = 1. # smoothing parameter for adaptive learning rate
L1 = 0. # L1 regularization, larger value means more regularized
L2 = 43. # L2 regularization, larger value means more regularized
# C, feature/hash trick
D = 636704+1 # number of weights to use
interaction = False # whether to enable poly2 feature interactions
# D, training/validation
epoch = 3 # learn training data for N passes
holdafter = None # data after date N (exclusive) are used as validation
holdout = None # use every N training instance for holdout validation
##############################################################################
# class, function, generator definitions #####################################
##############################################################################
class ftrl_proximal(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in xrange(L):
for j in xrange(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = {}
# wTx is the inner product of w and x
wTx = 0.
for i in self._indices(x):
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]
# cache the current w for update stage
self.w = w
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
# parameter
alpha = self.alpha
# model
n = self.n
z = self.z
w = self.w
# gradient under logloss
g = p - y
# update z and n
for i in self._indices(x):
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g
def logloss(p, y):
''' FUNCTION: Bounded logloss
INPUT:
p: our prediction
y: real answer
OUTPUT:
logarithmic loss of p given y
'''
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
def data(path, D, indexdic):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
for t, row in enumerate(DictReader(open(path))):
# process id
ID = row['id']
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# extract date
date = int(row['hour'][4:6])
dayofweek = (date-19)%7
# turn hour really into hour, it was originally YYMMDDHH
row['hour'] = str(int(row['hour'][6:]))
# build x
x = []
x.append(indexdic['day'][str(dayofweek)])
for key in row:
value = row[key]
# one-hot encode everything with hash trick
#index = abs(hash(key + '_' + value)) % D
if value in indexdic[key]:
index = indexdic[key][value]
x.append(index)
elif 'other' in indexdic[key]:
index = indexdic[key]['other']
x.append(index)
yield t, date, ID, x, y
##############################################################################
# start training #############################################################
##############################################################################
start = datetime.now()
# initialize ourselves a learner
learner = ftrl_proximal(alpha, beta, L1, L2, D, interaction)
f = open('./click/indexdicless3.pkl', 'rb')
indexdic = cPickle.load(f)
f.close()
# start training
for e in xrange(epoch):
loss = 0.
count = 0
for t, date, ID, x, y in data(train, D, indexdic): # data is a generator
# t: just a instance counter
# date: you know what this is
# ID: id provided in original data
# x: features
# y: label (click)
# step 1, get prediction from learner
p = learner.predict(x)
if (holdafter and date > holdafter) or (holdout and t % holdout == 0):
# step 2-1, calculate validation loss
# we do not train with the validation data so that our
# validation loss is an accurate estimation
#
# holdafter: train instances from day 1 to day N
# validate with instances from day N + 1 and after
#
# holdout: validate with every N instance, train with others
loss += logloss(p, y)
count += 1
else:
# step 2-2, update learner with label (click) information
learner.update(x, p, y)
#print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
# e, loss/count, str(datetime.now() - start)))
##############################################################################
# start testing, and build Kaggle's submission file ##########################
##############################################################################
with open(submission, 'w') as outfile:
outfile.write('id,click\n')
for t, date, ID, x, y in data(test, D, indexdic):
p = learner.predict(x)
outfile.write('%s,%s\n' % (ID, str(p)))
|
|
# Generated from /home/marisa/Work/tvm/python/tvm/relay/grammar/Relay.g4 by ANTLR 4.7.1
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2*")
buf.write("\u010d\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\3\2")
buf.write("\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\7\3\b\3")
buf.write("\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3")
buf.write("\f\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23")
buf.write("\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\6\25\u0097")
buf.write("\n\25\r\25\16\25\u0098\3\25\3\25\3\26\3\26\3\26\3\26\7")
buf.write("\26\u00a1\n\26\f\26\16\26\u00a4\13\26\3\26\3\26\3\26\3")
buf.write("\26\3\27\3\27\3\27\3\27\7\27\u00ae\n\27\f\27\16\27\u00b1")
buf.write("\13\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\31\3\31\3")
buf.write("\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\36")
buf.write("\3\37\3\37\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3#\3#\3")
buf.write("#\3$\3$\3$\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3&\3&\5&\u00e6")
buf.write("\n&\3\'\3\'\3\'\5\'\u00eb\n\'\3\'\5\'\u00ee\n\'\3(\3(")
buf.write("\3(\3)\6)\u00f4\n)\r)\16)\u00f5\3*\3*\5*\u00fa\n*\3*\3")
buf.write("*\3+\3+\5+\u0100\n+\3+\3+\3+\7+\u0105\n+\f+\16+\u0108")
buf.write("\13+\3,\3,\3-\3-\4\u00a2\u00af\2.\3\3\5\4\7\5\t\6\13\7")
buf.write("\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21")
buf.write("!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67")
buf.write("\359\36;\37= ?!A\"C#E$G%I&K\'M\2O(Q)S\2U*W\2Y\2\3\2\7")
buf.write("\5\2\13\f\17\17\"\"\4\2GGgg\4\2--//\4\2C\\c|\3\2\62;\2")
buf.write("\u0114\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2")
buf.write("\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2")
buf.write("\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33")
buf.write("\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2")
buf.write("\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2")
buf.write("\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2")
buf.write("\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2")
buf.write("\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3")
buf.write("\2\2\2\2K\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2U\3\2\2\2\3[")
buf.write("\3\2\2\2\5]\3\2\2\2\7_\3\2\2\2\ta\3\2\2\2\13c\3\2\2\2")
buf.write("\re\3\2\2\2\17h\3\2\2\2\21m\3\2\2\2\23q\3\2\2\2\25s\3")
buf.write("\2\2\2\27u\3\2\2\2\31w\3\2\2\2\33y\3\2\2\2\35|\3\2\2\2")
buf.write("\37\177\3\2\2\2!\u0083\3\2\2\2#\u0085\3\2\2\2%\u008c\3")
buf.write("\2\2\2\'\u008e\3\2\2\2)\u0096\3\2\2\2+\u009c\3\2\2\2-")
buf.write("\u00a9\3\2\2\2/\u00b7\3\2\2\2\61\u00b9\3\2\2\2\63\u00bb")
buf.write("\3\2\2\2\65\u00bd\3\2\2\2\67\u00bf\3\2\2\29\u00c1\3\2")
buf.write("\2\2;\u00c3\3\2\2\2=\u00c6\3\2\2\2?\u00c9\3\2\2\2A\u00cc")
buf.write("\3\2\2\2C\u00cf\3\2\2\2E\u00d2\3\2\2\2G\u00d5\3\2\2\2")
buf.write("I\u00d8\3\2\2\2K\u00e5\3\2\2\2M\u00e7\3\2\2\2O\u00ef\3")
buf.write("\2\2\2Q\u00f3\3\2\2\2S\u00f7\3\2\2\2U\u00ff\3\2\2\2W\u0109")
buf.write("\3\2\2\2Y\u010b\3\2\2\2[\\\7*\2\2\\\4\3\2\2\2]^\7+\2\2")
buf.write("^\6\3\2\2\2_`\7.\2\2`\b\3\2\2\2ab\7]\2\2b\n\3\2\2\2cd")
buf.write("\7_\2\2d\f\3\2\2\2ef\7k\2\2fg\7h\2\2g\16\3\2\2\2hi\7g")
buf.write("\2\2ij\7n\2\2jk\7u\2\2kl\7g\2\2l\20\3\2\2\2mn\7n\2\2n")
buf.write("o\7g\2\2op\7v\2\2p\22\3\2\2\2qr\7?\2\2r\24\3\2\2\2st\7")
buf.write("=\2\2t\26\3\2\2\2uv\7}\2\2v\30\3\2\2\2wx\7\177\2\2x\32")
buf.write("\3\2\2\2yz\7h\2\2z{\7p\2\2{\34\3\2\2\2|}\7/\2\2}~\7@\2")
buf.write("\2~\36\3\2\2\2\177\u0080\7f\2\2\u0080\u0081\7g\2\2\u0081")
buf.write("\u0082\7h\2\2\u0082 \3\2\2\2\u0083\u0084\7<\2\2\u0084")
buf.write("\"\3\2\2\2\u0085\u0086\7V\2\2\u0086\u0087\7g\2\2\u0087")
buf.write("\u0088\7p\2\2\u0088\u0089\7u\2\2\u0089\u008a\7q\2\2\u008a")
buf.write("\u008b\7t\2\2\u008b$\3\2\2\2\u008c\u008d\7a\2\2\u008d")
buf.write("&\3\2\2\2\u008e\u008f\7x\2\2\u008f\u0090\7\62\2\2\u0090")
buf.write("\u0091\7\60\2\2\u0091\u0092\7\62\2\2\u0092\u0093\7\60")
buf.write("\2\2\u0093\u0094\7\65\2\2\u0094(\3\2\2\2\u0095\u0097\t")
buf.write("\2\2\2\u0096\u0095\3\2\2\2\u0097\u0098\3\2\2\2\u0098\u0096")
buf.write("\3\2\2\2\u0098\u0099\3\2\2\2\u0099\u009a\3\2\2\2\u009a")
buf.write("\u009b\b\25\2\2\u009b*\3\2\2\2\u009c\u009d\7\61\2\2\u009d")
buf.write("\u009e\7\61\2\2\u009e\u00a2\3\2\2\2\u009f\u00a1\13\2\2")
buf.write("\2\u00a0\u009f\3\2\2\2\u00a1\u00a4\3\2\2\2\u00a2\u00a3")
buf.write("\3\2\2\2\u00a2\u00a0\3\2\2\2\u00a3\u00a5\3\2\2\2\u00a4")
buf.write("\u00a2\3\2\2\2\u00a5\u00a6\7\f\2\2\u00a6\u00a7\3\2\2\2")
buf.write("\u00a7\u00a8\b\26\2\2\u00a8,\3\2\2\2\u00a9\u00aa\7\61")
buf.write("\2\2\u00aa\u00ab\7,\2\2\u00ab\u00af\3\2\2\2\u00ac\u00ae")
buf.write("\13\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00b1\3\2\2\2\u00af")
buf.write("\u00b0\3\2\2\2\u00af\u00ad\3\2\2\2\u00b0\u00b2\3\2\2\2")
buf.write("\u00b1\u00af\3\2\2\2\u00b2\u00b3\7,\2\2\u00b3\u00b4\7")
buf.write("\61\2\2\u00b4\u00b5\3\2\2\2\u00b5\u00b6\b\27\2\2\u00b6")
buf.write(".\3\2\2\2\u00b7\u00b8\7,\2\2\u00b8\60\3\2\2\2\u00b9\u00ba")
buf.write("\7\61\2\2\u00ba\62\3\2\2\2\u00bb\u00bc\7-\2\2\u00bc\64")
buf.write("\3\2\2\2\u00bd\u00be\7/\2\2\u00be\66\3\2\2\2\u00bf\u00c0")
buf.write("\7>\2\2\u00c08\3\2\2\2\u00c1\u00c2\7@\2\2\u00c2:\3\2\2")
buf.write("\2\u00c3\u00c4\7>\2\2\u00c4\u00c5\7?\2\2\u00c5<\3\2\2")
buf.write("\2\u00c6\u00c7\7@\2\2\u00c7\u00c8\7?\2\2\u00c8>\3\2\2")
buf.write("\2\u00c9\u00ca\7?\2\2\u00ca\u00cb\7?\2\2\u00cb@\3\2\2")
buf.write("\2\u00cc\u00cd\7#\2\2\u00cd\u00ce\7?\2\2\u00ceB\3\2\2")
buf.write("\2\u00cf\u00d0\7B\2\2\u00d0\u00d1\5U+\2\u00d1D\3\2\2\2")
buf.write("\u00d2\u00d3\7\'\2\2\u00d3\u00d4\5U+\2\u00d4F\3\2\2\2")
buf.write("\u00d5\u00d6\7\'\2\2\u00d6\u00d7\5Q)\2\u00d7H\3\2\2\2")
buf.write("\u00d8\u00d9\7o\2\2\u00d9\u00da\7w\2\2\u00da\u00db\7v")
buf.write("\2\2\u00dbJ\3\2\2\2\u00dc\u00dd\7V\2\2\u00dd\u00de\7t")
buf.write("\2\2\u00de\u00df\7w\2\2\u00df\u00e6\7g\2\2\u00e0\u00e1")
buf.write("\7H\2\2\u00e1\u00e2\7c\2\2\u00e2\u00e3\7n\2\2\u00e3\u00e4")
buf.write("\7u\2\2\u00e4\u00e6\7g\2\2\u00e5\u00dc\3\2\2\2\u00e5\u00e0")
buf.write("\3\2\2\2\u00e6L\3\2\2\2\u00e7\u00ea\5Q)\2\u00e8\u00e9")
buf.write("\7\60\2\2\u00e9\u00eb\5Q)\2\u00ea\u00e8\3\2\2\2\u00ea")
buf.write("\u00eb\3\2\2\2\u00eb\u00ed\3\2\2\2\u00ec\u00ee\5S*\2\u00ed")
buf.write("\u00ec\3\2\2\2\u00ed\u00ee\3\2\2\2\u00eeN\3\2\2\2\u00ef")
buf.write("\u00f0\5M\'\2\u00f0\u00f1\7h\2\2\u00f1P\3\2\2\2\u00f2")
buf.write("\u00f4\5Y-\2\u00f3\u00f2\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5")
buf.write("\u00f3\3\2\2\2\u00f5\u00f6\3\2\2\2\u00f6R\3\2\2\2\u00f7")
buf.write("\u00f9\t\3\2\2\u00f8\u00fa\t\4\2\2\u00f9\u00f8\3\2\2\2")
buf.write("\u00f9\u00fa\3\2\2\2\u00fa\u00fb\3\2\2\2\u00fb\u00fc\5")
buf.write("Q)\2\u00fcT\3\2\2\2\u00fd\u0100\7a\2\2\u00fe\u0100\5W")
buf.write(",\2\u00ff\u00fd\3\2\2\2\u00ff\u00fe\3\2\2\2\u0100\u0106")
buf.write("\3\2\2\2\u0101\u0105\7a\2\2\u0102\u0105\5W,\2\u0103\u0105")
buf.write("\5Y-\2\u0104\u0101\3\2\2\2\u0104\u0102\3\2\2\2\u0104\u0103")
buf.write("\3\2\2\2\u0105\u0108\3\2\2\2\u0106\u0104\3\2\2\2\u0106")
buf.write("\u0107\3\2\2\2\u0107V\3\2\2\2\u0108\u0106\3\2\2\2\u0109")
buf.write("\u010a\t\5\2\2\u010aX\3\2\2\2\u010b\u010c\t\6\2\2\u010c")
buf.write("Z\3\2\2\2\16\2\u0098\u00a2\u00af\u00e5\u00ea\u00ed\u00f5")
buf.write("\u00f9\u00ff\u0104\u0106\3\b\2\2")
return buf.getvalue()
class RelayLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
SEMVER = 19
WS = 20
LINE_COMMENT = 21
COMMENT = 22
MUL = 23
DIV = 24
ADD = 25
SUB = 26
LT = 27
GT = 28
LE = 29
GE = 30
EQ = 31
NE = 32
GLOBAL_VAR = 33
LOCAL_VAR = 34
GRAPH_VAR = 35
MUT = 36
BOOL_LIT = 37
FLOAT = 38
NAT = 39
CNAME = 40
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "','", "'['", "']'", "'if'", "'else'", "'let'",
"'='", "';'", "'{'", "'}'", "'fn'", "'->'", "'def'", "':'",
"'Tensor'", "'_'", "'v0.0.3'", "'*'", "'/'", "'+'", "'-'", "'<'",
"'>'", "'<='", "'>='", "'=='", "'!='", "'mut'" ]
symbolicNames = [ "<INVALID>",
"SEMVER", "WS", "LINE_COMMENT", "COMMENT", "MUL", "DIV", "ADD",
"SUB", "LT", "GT", "LE", "GE", "EQ", "NE", "GLOBAL_VAR", "LOCAL_VAR",
"GRAPH_VAR", "MUT", "BOOL_LIT", "FLOAT", "NAT", "CNAME" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "SEMVER", "WS", "LINE_COMMENT",
"COMMENT", "MUL", "DIV", "ADD", "SUB", "LT", "GT", "LE",
"GE", "EQ", "NE", "GLOBAL_VAR", "LOCAL_VAR", "GRAPH_VAR",
"MUT", "BOOL_LIT", "PREFLOAT", "FLOAT", "NAT", "EXP",
"CNAME", "LETTER", "DIGIT" ]
grammarFileName = "Relay.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
|
"""
sentry.models.authenticator
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import hmac
import time
import base64
import hashlib
from u2flib_server import u2f
from u2flib_server import jsapi as u2f_jsapi
from cryptography.exceptions import InvalidSignature, InvalidKey
from django.db import models
from django.core.cache import cache
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.core.urlresolvers import reverse
from sentry import options
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField, \
FlexibleForeignKey, BoundedPositiveIntegerField, UnicodePickledObjectField
from sentry.utils.decorators import classproperty
from sentry.utils.otp import generate_secret_key, TOTP
from sentry.utils.sms import send_sms, sms_available
from sentry.utils.dates import to_datetime
from sentry.utils.http import absolute_uri
class ActivationResult(object):
type = None
class ActivationMessageResult(ActivationResult):
def __init__(self, message, type='info'):
assert type in ('error', 'warning', 'info')
self.type = type
self.message = message
class ActivationChallengeResult(ActivationResult):
type = 'challenge'
def __init__(self, challenge):
self.challenge = challenge
class AuthenticatorManager(BaseManager):
def all_interfaces_for_user(self, user, return_missing=False):
"""Returns a correctly sorted list of all interfaces the user
has enabled. If `return_missing` is set to `True` then all
interfaces are returned even if not enabled.
"""
_sort = lambda x: sorted(x, key=lambda x: (x.type == 0, x.type))
# Collect interfaces user is enrolled in
ifaces = [x.interface for x in Authenticator.objects.filter(
user=user,
type__in=[a.type for a in available_authenticators()],
)]
if return_missing:
# Collect additional interfaces that the user
# is not enrolled in
rvm = dict(AUTHENTICATOR_INTERFACES)
for iface in ifaces:
rvm.pop(iface.interface_id, None)
for iface_cls in rvm.itervalues():
if iface_cls.is_available:
ifaces.append(iface_cls())
return _sort(ifaces)
def auto_add_recovery_codes(self, user, force=False):
"""This automatically adds the recovery code backup interface in
case no backup interface is currently set for the user. Returns
the interface that was added.
"""
has_authenticators = False
# If we're not forcing, check for a backup interface already setup
# or if it's missing, we'll need to set it.
if not force:
for authenticator in Authenticator.objects.filter(
user=user,
type__in=[a.type for a in available_authenticators()]
):
iface = authenticator.interface
if iface.is_backup_interface:
return
has_authenticators = True
if has_authenticators or force:
interface = RecoveryCodeInterface()
interface.enroll(user)
return interface
def get_interface(self, user, interface_id):
"""Looks up an interface by interface ID for a user. If the
interface is not available but configured a
`Authenticator.DoesNotExist` will be raised just as if the
authenticator was not configured at all.
"""
interface = AUTHENTICATOR_INTERFACES.get(interface_id)
if interface is None or not interface.is_available:
raise LookupError('No such interface %r' % interface_id)
try:
return Authenticator.objects.get(
user=user,
type=interface.type,
).interface
except Authenticator.DoesNotExist:
return interface()
def user_has_2fa(self, user):
"""Checks if the user has any 2FA configured.
"""
return Authenticator.objects.filter(
user=user,
type__in=[a.type for a in available_authenticators(ignore_backup=True)],
).exists()
def bulk_users_have_2fa(self, user_ids):
"""Checks if a list of user ids have 2FA configured.
Returns a dict of {<id>: <has_2fa>}
"""
authenticators = set(Authenticator.objects.filter(
user__in=user_ids,
type__in=[a.type for a in available_authenticators(ignore_backup=True)],
).distinct().values_list('user_id', flat=True))
return {id: id in authenticators for id in user_ids}
AUTHENTICATOR_INTERFACES = {}
AUTHENTICATOR_INTERFACES_BY_TYPE = {}
AUTHENTICATOR_CHOICES = []
def register_authenticator(cls):
AUTHENTICATOR_INTERFACES[cls.interface_id] = cls
AUTHENTICATOR_INTERFACES_BY_TYPE[cls.type] = cls
AUTHENTICATOR_CHOICES.append((cls.type, cls.name))
return cls
def available_authenticators(ignore_backup=False):
interfaces = AUTHENTICATOR_INTERFACES.itervalues()
if not ignore_backup:
return [v for v in interfaces if v.is_available]
return [v for v in interfaces if not v.is_backup_interface and v.is_available]
class AuthenticatorInterface(object):
type = -1
interface_id = None
name = None
description = None
is_backup_interface = False
enroll_button = _('Enroll')
configure_button = _('Info')
remove_button = _('Remove')
is_available = True
allow_multi_enrollment = False
def __init__(self, authenticator=None):
if authenticator is None:
self.authenticator = None
else:
self.authenticator = authenticator
@property
def is_enrolled(self):
"""Returns `True` if the interfaces is enrolled (eg: has an
authenticator for a user attached).
"""
return self.authenticator is not None
@property
def requires_activation(self):
"""If the interface has an activation method that needs to be
called this returns `True`.
"""
return self.activate.im_func is not \
AuthenticatorInterface.activate.im_func
@property
def can_validate_otp(self):
"""If the interface is able to validate OTP codes then this returns
`True`.
"""
return self.validate_otp.im_func is not \
AuthenticatorInterface.validate_otp.im_func
@property
def config(self):
"""Returns the configuration dictionary for this interface. If
the interface is registered with an authenticator (eg: it is
enrolled) then the authenticator's config is returned, otherwise
a new config is used on first access.
"""
if self.authenticator is not None:
return self.authenticator.config
rv = getattr(self, '_unbound_config', None)
if rv is None:
# Prevent bad recursion if stuff wants to access the default
# config
self._unbound_config = {}
rv = self._unbound_config = self.generate_new_config()
return rv
def generate_new_config(self):
"""This method is invoked if a new config is required."""
return {}
def activate(self, request):
"""If an authenticator overrides this then the method is called
when the dialog for authentication is brought up. The returned string
is then rendered in the UI.
"""
# This method needs to be empty for the default
# `requires_activation` property to make sense.
pass
def enroll(self, user):
"""Invoked to enroll a user for this interface. If already enrolled
an error is raised.
"""
if self.authenticator is None:
self.authenticator = Authenticator.objects.create(
user=user,
type=self.type,
config=self.config,
)
else:
if not self.allow_multi_enrollment:
raise Authenticator.AlreadyEnrolled()
self.authenticator.config = self.config
self.authenticator.save()
def validate_otp(self, otp):
"""This method is invoked for an OTP response and has to return
`True` or `False` based on the validity of the OTP response. Note
that this can be called with otp responses from other interfaces.
"""
return False
def validate_response(self, request, challenge, response):
"""If the activation generates a challenge that needs to be
responded to this validates the response for that challenge. This
is only ever called for challenges emitted by the activation of this
activation interface.
"""
return False
@register_authenticator
class RecoveryCodeInterface(AuthenticatorInterface):
"""A backup interface that is based on static recovery codes."""
type = 0
interface_id = 'recovery'
name = _('Recovery Codes')
description = _('Recovery codes can be used to access your account in the '
'event you lose access to your device and cannot '
'receive two-factor authentication codes.')
enroll_button = _('Activate')
configure_button = _('View Codes')
remove_button = None
is_backup_interface = True
def __init__(self, authenticator=None):
AuthenticatorInterface.__init__(self, authenticator)
def get_codes(self):
rv = []
if self.is_enrolled:
h = hmac.new(self.config['salt'], None, hashlib.sha1)
for x in xrange(10):
h.update('%s|' % x)
rv.append(base64.b32encode(h.digest())[:8])
return rv
def generate_new_config(self):
return {
'salt': os.urandom(16).encode('hex'),
'used': 0,
}
def regenerate_codes(self, save=True):
if not self.is_enrolled:
raise RuntimeError('Interface is not enrolled')
self.config.update(self.generate_new_config())
if save:
self.authenticator.save()
def validate_otp(self, otp):
mask = self.config['used']
code = otp.strip().replace('-', '').upper()
for idx, ref_code in enumerate(self.get_codes()):
if code == ref_code:
if mask & (1 << idx):
break
self.config['used'] = mask | (1 << idx)
return True
return False
def get_unused_codes(self):
mask = self.config['used']
rv = []
for idx, code in enumerate(self.get_codes()):
if not mask & (1 << idx):
rv.append(code[:4] + '-' + code[4:])
return rv
class OtpMixin(object):
def generate_new_config(self):
return {
'secret': generate_secret_key(),
}
def _get_secret(self):
return self.config['secret']
def _set_secret(self, secret):
self.config['secret'] = secret
secret = property(_get_secret, _set_secret)
del _get_secret, _set_secret
def make_otp(self):
return TOTP(self.secret)
def _get_otp_counter_cache_key(self, counter):
if self.authenticator is not None:
return 'used-otp-counters:%s:%s' % (
self.authenticator.user_id,
counter,
)
def check_otp_counter(self, counter):
# OTP uses an internal counter that increments every 30 seconds.
# A hash function generates a six digit code based on the counter
# and a secret key. If the generated PIN was used it is marked in
# redis as used by remembering which counter it was generated
# from. This is what we check for here.
cache_key = self._get_otp_counter_cache_key(counter)
return cache_key is None or cache.get(cache_key) != '1'
def mark_otp_counter_used(self, counter):
cache_key = self._get_otp_counter_cache_key(counter)
if cache_key is not None:
# Mark us used for three windows
cache.set(cache_key, '1', timeout=120)
def validate_otp(self, otp):
otp = otp.strip().replace('-', '').replace(' ', '')
used_counter = self.make_otp().verify(
otp, return_counter=True,
check_counter_func=self.check_otp_counter)
if used_counter is not None:
self.mark_otp_counter_used(used_counter)
return True
return False
@register_authenticator
class TotpInterface(OtpMixin, AuthenticatorInterface):
"""This interface uses TOTP with an authenticator."""
type = 1
interface_id = 'totp'
name = _('Authenticator App')
description = _('An authenticator application that supports TOTP (like '
'Google Authenticator or 1Password) can be used to '
'conveniently secure your account. A new token is '
'generated every 30 seconds.')
def get_provision_qrcode(self, user, issuer=None):
return self.make_otp().get_provision_qrcode(
user, issuer=issuer)
@register_authenticator
class SmsInterface(OtpMixin, AuthenticatorInterface):
"""This interface sends OTP codes via text messages to the user."""
type = 2
interface_id = 'sms'
name = _('Text Message')
description = _('This authenticator sends you text messages for '
'verification. It\'s useful as a backup method '
'or when you do not have a phone that supports '
'an authenticator application.')
code_ttl = 45
@classproperty
def is_available(cls):
return sms_available()
def generate_new_config(self):
config = super(SmsInterface, self).generate_new_config()
config['phone_number'] = None
return config
def make_otp(self):
return TOTP(self.config['secret'], digits=6, interval=self.code_ttl,
default_window=1)
def _get_phone_number(self):
return self.config['phone_number']
def _set_phone_number(self, value):
self.config['phone_number'] = value
phone_number = property(_get_phone_number, _set_phone_number)
del _get_phone_number, _set_phone_number
def activate(self, request):
if self.send_text(request=request):
return ActivationMessageResult(
_('A confirmation code was sent to your phone. '
'It is valid for %d seconds.') % self.code_ttl)
return ActivationMessageResult(
_('Error: we failed to send a text message to you. You '
'can try again later or sign in with a different method.'),
type='error')
def send_text(self, for_enrollment=False, request=None):
ctx = {'code': self.make_otp().generate_otp()}
if for_enrollment:
text = _('%(code)s is your Sentry two-factor enrollment code. '
'You are about to set up text message based two-factor '
'authentication.')
else:
text = _('%(code)s is your Sentry authentication code.')
if request is not None:
text = u'%s\n\n%s' % (text, _('Requested from %(ip)s'))
ctx['ip'] = request.META['REMOTE_ADDR']
return send_sms(text % ctx, to=self.phone_number)
@register_authenticator
class U2fInterface(AuthenticatorInterface):
type = 3
interface_id = 'u2f'
configure_button = _('Configure')
name = _('U2F (Universal 2nd Factor)')
description = _('Authenticate with a U2F hardware device. This is a '
'device like a Yubikey or something similar which '
'supports FIDO\'s U2F specification. This also requires '
'a browser which supports this system (like Google '
'Chrome).')
allow_multi_enrollment = True
@classproperty
def u2f_app_id(cls):
rv = options.get('u2f.app-id')
return rv or absolute_uri(reverse('sentry-u2f-app-id'))
@classproperty
def u2f_facets(cls):
facets = options.get('u2f.facets')
if not facets:
return [options.get('system.url-prefix')]
return [x.rstrip('/') for x in facets]
@classproperty
def is_available(cls):
url_prefix = options.get('system.url-prefix')
return url_prefix and url_prefix.startswith('https://')
def generate_new_config(self):
return {}
def start_enrollment(self):
return dict(u2f.start_register(self.u2f_app_id,
self.get_u2f_devices()))
def get_u2f_devices(self):
rv = []
for data in self.config.get('devices') or ():
rv.append(u2f_jsapi.DeviceRegistration(data['binding']))
return rv
def remove_u2f_device(self, key):
"""Removes a U2F device but never removes the last one. This returns
False if the last device would be removed.
"""
devices = [x for x in self.config.get('devices') or ()
if x['binding']['keyHandle'] != key]
if devices:
self.config['devices'] = devices
return True
return False
def get_registered_devices(self):
rv = []
for device in self.config.get('devices') or ():
rv.append({
'timestamp': to_datetime(device['ts']),
'name': device['name'],
'key_handle': device['binding']['keyHandle'],
'app_id': device['binding']['appId'],
})
rv.sort(key=lambda x: x['name'])
return rv
def try_enroll(self, enrollment_data, response_data, device_name=None):
binding, cert = u2f.complete_register(enrollment_data, response_data,
self.u2f_facets)
devices = self.config.setdefault('devices', [])
devices.append({
'name': device_name or 'Security Key',
'ts': int(time.time()),
'binding': dict(binding),
})
def activate(self, request):
return ActivationChallengeResult(
challenge=dict(u2f.start_authenticate(self.get_u2f_devices())),
)
def validate_response(self, request, challenge, response):
try:
counter, touch = u2f.verify_authenticate(self.get_u2f_devices(),
challenge, response,
self.u2f_facets)
except (InvalidSignature, InvalidKey, StopIteration):
return False
return True
class Authenticator(BaseModel):
__core__ = True
id = BoundedAutoField(primary_key=True)
user = FlexibleForeignKey('sentry.User', db_index=True)
created_at = models.DateTimeField(_('created at'), default=timezone.now)
last_used_at = models.DateTimeField(_('last used at'), null=True)
type = BoundedPositiveIntegerField(choices=AUTHENTICATOR_CHOICES)
config = UnicodePickledObjectField()
objects = AuthenticatorManager()
class AlreadyEnrolled(Exception):
pass
class Meta:
app_label = 'sentry'
db_table = 'auth_authenticator'
verbose_name = _('authenticator')
verbose_name_plural = _('authenticators')
@cached_property
def interface(self):
return AUTHENTICATOR_INTERFACES_BY_TYPE[self.type](self)
def mark_used(self, save=True):
self.last_used_at = timezone.now()
if save:
self.save()
def __repr__(self):
return '<Authenticator user=%r interface=%r>' % (
self.user.email,
self.interface.interface_id,
)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2016 Alessandro Amici
#
# python 2 support via python-future
from __future__ import absolute_import, unicode_literals
from pytest_nodev import plugin
TEST_PASS_PY = '''
def test_pass():
assert True
'''
TEST_FACTORIAL_PY = '''
def test_factorial(candidate):
factorial = candidate
assert factorial(0) == 1
assert factorial(1) == 1
assert factorial(21) == 51090942171709440000
'''
TEST_POW_PY = '''
import pytest
@pytest.mark.candidate('pow')
def test_pow():
assert pow(2, 9, 47) == 42
'''
def test_import_coverage():
"""Fix the coverage by pytest-cov, that may trigger after pytest_nodev is already imported."""
from imp import reload # Python 2 and 3 reload
reload(plugin)
#
# pytest hooks
#
def test_pytest_addoption(testdir):
"""The plugin is registered with pytest."""
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'nodev:',
'*--candidates-from-stdlib*',
'*--candidates-fail*',
])
def test_pytest_generate_tests(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY + TEST_PASS_PY)
result = testdir.runpytest(
'--candidates-from-modules=math',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:factorial*XPASS',
'*test_pass*PASSED',
])
assert result.ret == 0
result = testdir.runpytest(
'--candidates-from-modules=math',
'--candidates-fail',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:factorial*PASSED',
'*test_pass*PASSED',
])
assert result.ret == 1
def test_pytest_terminal_summary(testdir):
testdir.makepyfile(TEST_PASS_PY)
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*test_pass*PASSED',
])
assert result.ret == 0
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-modules=math',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:factorial*PASSED',
])
assert result.ret == 0
#
# command line options
#
def test_pytest_run_no_candidate(testdir):
"""We didn't break pytest."""
testdir.makepyfile(TEST_PASS_PY)
result = testdir.runpytest(
'-v',
)
result.stdout.fnmatch_lines([
'*test_pass*PASSED',
])
assert result.ret == 0
def test_pytest_run_no_candidate_option(testdir):
"""Skip tests with the *candidate* fixture if no ``--candidates-*`` option is given."""
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*candidate*SKIPPED',
])
assert result.ret == 0
def test_pytest_run_from_modules(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-modules=math',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:fabs*xfail',
'*test_factorial*math:factorial*XPASS',
])
assert result.ret == 0
def test_pytest_run_from_modules_twice(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY + TEST_POW_PY)
result = testdir.runpytest(
'--candidates-from-modules=math',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:fabs*xfail',
'*test_factorial*math:factorial*XPASS',
])
assert result.ret == 0
def test_pytest_run_from_specs(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-specs=pip',
'--candidates-includes=pip.exceptions',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*pip.exceptions:*xfail',
])
assert result.ret == 0
def test_pytest_run_from_stdlib(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-stdlib',
'--candidates-includes=math',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:fabs*xfail',
'*test_factorial*math:factorial*XPASS',
])
assert result.ret == 0
def test_pytest_run_from_all(testdir, monkeypatch):
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-all',
'--candidates-includes=math:factorial|pip.exceptions',
'-v',
)
assert result.ret == 1
monkeypatch.setenv('PYTEST_NODEV_MODE', 'FEARLESS')
result = testdir.runpytest(
'--candidates-from-all',
'--candidates-includes=math:factorial|pip.exceptions',
'-v',
)
result.stdout.fnmatch_lines([
'*test_factorial*math:factorial*XPASS',
'*test_factorial*pip.exceptions:*xfail',
])
assert result.ret == 0
def test_candidate_modules_object_blacklist(testdir):
testdir.makepyfile(TEST_FACTORIAL_PY)
result = testdir.runpytest(
'--candidates-from-modules=posix',
'--candidates-includes=.*fork',
'-v',
)
assert result.ret == 0
|
|
# Copyright (c) <2016> <GUANGHAN NING>. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Script File: ROLO_utils.py
Description:
ROLO is short for Recurrent YOLO, aimed at simultaneous object detection and tracking
Paper: http://arxiv.org/abs/1607.05781
Author: Guanghan Ning
Webpage: http://guanghan.info/
'''
import os, sys, time
import numpy as np
import ConfigParser
import cv2
import pickle
import tensorflow as tf
import math
global Config
import matplotlib.pyplot as plt
class ROLO_utils:
cfgPath = ''
Config = []
flag_train = False
flag_track = False
flag_detect = False
params= {
'img_size': [448, 448],
'num_classes': 20,
'alpha': 0.1,
'dropout_rate': 0.5,
'num_feat_lstm': 5001,
'num_steps': 28,
'batch_size': 1,
'tensor_size': 7,
'predict_num_per_tensor': 2,
'threshold': 0.2,
'iou_threshold': 0.5,
'class_labels': ["stop", "vehicle", "pedestrian"],
'conv_layer_num': 3,
'conv_filters': [16, 32, 64],
'conv_size': [3, 3, 3],
'conv_stride': [1,1,1],
'fc_layer_num': 3,
'fc_input': [1024, 1024, 1024],
'fc_output': [1024, 1024, 1024]
}
file_weights= None
file_in_path= None
file_out_path= None
flag_write= False
flag_show_img= False
batch_size = 128
x_path= "u03/yolo_output/"
y_path= "u03/rolo_gt"
def __init__(self,argvs = []):
print("Utils init")
# Network Parameters
def loadCfg(self):
Config = ConfigParser.ConfigParser()
Config.read(self.cfgPath)
Sections = Config.sections()
print('self.cfgPath=' + self.cfgPath)
if os.path.isfile(self.cfgPath):
dict_parameters = self.ConfigSectionMap(Config, "Parameters")
dict_networks = self.ConfigSectionMap(Config, "Networks")
self.params['img_size']= dict_parameters['img_size'] #[448, 448]
self.params['alpha'] = dict_parameters['alpha']
self.params['num_classes']= dict_parameters['num_classes'] #20
self.params['dropout_rate']= dict_parameters['dropout_rate'] # 0.5
self.params['num_feat_lstm']= dict_parameters['num_feat_lstm'] #4096+5 # number of features in hidden layer of LSTM
self.params['num_steps']= dict_parameters['num_steps'] # 28 # timesteps for LSTM
self.params['batch_size']= dict_parameters['batch_size'] # 1 # during testing it is 1; during training it is 64.
self.params['tensor_size'] = dict_parameters['tensor_size']
self.params['predict_num_per_tensor'] = dict_parameters['predict_num_per_tensor']
self.params['threshold'] = dict_parameters['tensor_size']
self.params['iou_threshold'] = dict_parameters['iou_threshold']
self.params['conv_layer_num'] = dict_networks['conv_layer_num']
self.params['conv_filters']= dict_networks['conv_filters']
self.params['conv_size']= dict_networks['conv_size']
self.params['conv_stride']= dict_networks['conv_stride']
self.params['fc_layer_num']= dict_networks['fc_layer_num']
self.params['fc_input'] = dict_networks['fc_input']
self.params['fc_output'] = dict_networks['fc_output']
return self.params
def ConfigSectionMap(self, Config, section):
dict1= {}
options = Config.options(section)
for option in options:
dict1[option] = Config.get(section, option)
return dict1
def validate_file_format(self, file_in_path, allowed_format):
if not os.path.isfile(file_in_path) or os.path.splitext(file_in_path)[1][1:] not in allowed_format:
#print(os.path.splitext(file_in_path)[1][1:])
print "Input file with correct format not found.\n"
return False
else:
return True
def argv_parser(self, argvs):
#global file_weights, file_in_path, file_out_path, flag_write, flag_show_img
allowed_format = ['png', 'jpg', 'JPEG', 'avi', 'mp4', 'mkv','cfg']
for i in range(1, len(argvs), 2):
if argvs[i] == '-train': self.flag_train= True
if argvs[i] == '-cfg': self.cfgPath = argvs[i+1]
if argvs[i] == '-weights': self.file_weights = argvs[i+1]
if argvs[i] == '-input': self.file_in_path = argvs[i+1]; self.validate_file_format(file_in_path, allowed_format)
if argvs[i] == '-output': self.file_out_path = argvs[i+1]; self.flag_write = True
if argvs[i] == '-detect': self.flag_detect = True; self.flag_track= False;
if argvs[i] == '-track': self.flag_detect= True; self.flag_track = True;
if argvs[i] == '-imshow':
if argvs[i+1] == '1': self.flag_show_img = True
else: self.flag_show_img = False
return (self.cfgPath, self.file_weights, self.file_in_path)
def is_image(self, file_in_path):
if os.path.isfile(file_in_path) and os.path.splitext(file_in_path)[1][1:] in ['jpg', 'JPEG', 'png', 'JPG']:
return True
else:
return False
def is_video(self, file_in_path):
if os.path.isfile(file_in_path) and os.path.splitext(file_in_path)[1][1:] in ['avi', 'mkv', 'mp4']:
return True
else:
return False
# Not Face user
def file_to_img(self, filepath):
print 'Processing '+ filepath
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
print 'processing '+ filepath
try:
video = cv2.VideoCapture(filepath)
except IOError:
print 'cannot open video file: ' + filepath
else:
print 'unknown error reading video file'
return video
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
#batch_size= len(pred_locs)
batch_size= self.batch_size
#assert (len(gts)== batch_size)
#print("gts: ", gts)
#print("batch_size: ", batch_size)
#print("pred_locs: ", pred_locs)
#ious = []
ious = np.zeros((batch_size, 4))
for i in range(batch_size):
pred_loc = pred_locs[i,:]
#print("pred_loc[i]: ", pred_loc)
gt = gts[i,:]
iou_ = self.iou(pred_loc, gt)
#ious.append(iou_)
#print("iou_", iou_)
ious[i,:]= iou_
#ious= tf.reshape(ious, batch_size)
#print("ious: ", ious)
'''
avg_iou= 0
for i in range(batch_size):
pred_loc = pred_locs[i,:]
gt= gts[i,:]
print("gt", gt)
#print("pred_loc", pred_loc)
avg_iou += self.iou(pred_loc, gt)
avg_iou /= batch_size
print("avg_iou shape: ", tf.shape(avg_iou)) # single tensor expected
return avg_iou'''
return ious
def load_folder(self, path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
return paths
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location(self, lines, id):
#print("lines length: ", len(lines))
#print("id: ", id)
line = lines[id]
elems = line.split('\t') # for gt type 2
#print(elems)
if len(elems) < 4:
elems = line.split(',') #for gt type 1
#print(elems)
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised= [x1 + w/2, y1 + h/2, w, h]
max_ious= 0
for location, id in enumerate(locations):
location_revised = location[1:5]
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
return locations[index]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext + ".yolo"
path = os.path.join(out_fold, output_name)
pickle.dump(yolo_output, open(path, "rb"))
def load_yolo_output(self, fold, batch_size, num_steps, step):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= step*batch_size*num_steps
ed= (step+1)*batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
#yolo_output= pickle.load(open(path, "rb"))
yolo_output = np.load(path)
yolo_output= np.reshape(yolo_output, 4102)
yolo_output[4096]= 0
yolo_output[4101]= 0
yolo_output_batch.append(yolo_output)
print(yolo_output_batch)
yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 4102])
return yolo_output_batch
def load_rolo_gt(self, path, batch_size, num_steps, step):
lines= self.load_dataset_gt(path)
offset= num_steps - 2 # offset is for prediction of the future
st= step*batch_size*num_steps
ed= (step+1)*batch_size*num_steps
#print("st: " + str(st))
#print("ed: " + str(ed))
batch_locations= []
for id in range(st+offset, ed+offset, num_steps):
location= self.find_gt_location(lines, id)
batch_locations.append(location)
return batch_locations
def load_yolo_output_test(self, fold, batch_size, num_steps, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= id
ed= id + batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
yolo_output = np.load(path)
#print(yolo_output)
yolo_output= np.reshape(yolo_output, 4102)
yolo_output_batch.append(yolo_output)
yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 4102])
return yolo_output_batch
def load_yolo_feat_test_MOLO(self, fold, batch_size, num_steps, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= id
ed= id + batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
yolo_output = np.load(path)
#print(yolo_output[0][0][0])
#print(len(yolo_output[0][0][0]))
yolo_output_new= np.concatenate(
( np.reshape(yolo_output[0][0][0], [-1, 4096]),
np.reshape([0,0,0,0,0,0], [-1, 6]) ),
axis = 1)
yolo_output_new= np.reshape(yolo_output_new, 4102)
yolo_output_batch.append(yolo_output_new)
yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 4102])
#print 'yolo_output_batch:' + str(yolo_output_batch)
return yolo_output_batch
def load_yolo_output_test_MOLO(self, fold, batch_size, num_steps, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= id
ed= id + batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
yolo_output = np.load(path)
#print(yolo_output[0][0][0])
#print(len(yolo_output[0][0][0]))
#yolo_output_new= np.concatenate(
# ( np.reshape(yolo_output[0][0][0], [-1, 4096]),
# np.reshape([0,0,0,0,0,0], [-1, 6]) ),
# axis = 1)
#yolo_output_new= np.reshape(yolo_output_new, 4102)
yolo_output_batch.append(yolo_output)
#yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 4102])
#print 'yolo_output_batch:' + str(yolo_output_batch)
return yolo_output_batch
def load_rolo_gt_test(self, path, batch_size, num_steps, id):
lines= self.load_dataset_gt(path)
offset= num_steps - 2 # offset is for prediction of the future
st= id
ed= id + batch_size*num_steps
batch_locations= []
for id in range(st+offset, ed+offset, num_steps):
location= self.find_gt_location(lines, id)
batch_locations.append(location)
return batch_locations
#-----------------------------------------------------------------------------------------------
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
[x1, y1, x2, y2] = coord
for y in range(y1, y2+1):
for x in range(x1, x2+1):
index = y*32 + x
heatmap_vec[index] = 1.0 #random.uniform(0.8, 1)#1.0
return heatmap_vec
def heatmap_vec_to_heatmap(self, heatmap_vec):
size = 32
heatmap= np.zeros((size, size))
for y in range(0, size):
for x in range(0, size):
index = y*size + x
heatmap[y][x] = heatmap_vec[index]
return heatmap
def draw_heatmap(self, heatmap):
fig = plt.figure(1, figsize=(10,10))
ax2 = fig.add_subplot(222)
#print(heatmap)
ax2.imshow(heatmap, origin='lower', aspect='auto')
ax2.set_title("heatmap")
plt.show()
#cv2.imshow('YOLO_small detection',heatmap)
#cv2.waitKey(1)
def load_yolo_output_heatmap(self, fold, batch_size, num_steps, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= id
ed= id + batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
yolo_output = np.load(path)
#print(yolo_output)
yolo_output= np.reshape(yolo_output, 5120)
yolo_output_batch.append(yolo_output)
yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 5120])
return yolo_output_batch
def createFolder( path):
if not os.path.exists(path):
os.makedirs(path)
def load_folder( path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
return sorted(paths)
def load_dataset_gt(gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location( lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') # for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_yolo_location( fold, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
path= paths[id-1]
#print(path)
yolo_output = np.load(path)
#print(yolo_output[0][4096:4102])
yolo_location= yolo_output[0][4097:4101]
return yolo_location
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def find_yolo_kalman_location( fold, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
sort_nicely(paths)
path= paths[id-1]
f = open(path, 'r')
yolo_kalman_str = f.read().split(' ')
yolo_kalman = [float(yolo_kalman_str[0]),float(yolo_kalman_str[1]),float(yolo_kalman_str[2]),float(yolo_kalman_str[3]) ]
yolo_kalman_location= yolo_kalman[0:4]
return yolo_kalman_location
def find_rolo_location( fold, id):
filename= str(id) + '.npy'
path= os.path.join(fold, filename)
rolo_output = np.load(path)
return rolo_output
def file_to_img( filepath):
img = cv2.imread(filepath)
return img
def debug_location( img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_gt_location( img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('gt',img_cp)
cv2.waitKey(1)
def debug_3_locations( img, gt_location, yolo_location, rolo_location):
img_cp = img.copy()
for i in range(3): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blur for yolo
elif i ==2: location= rolo_location; color= (0, 255, 0) # green for rolo
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
if i == 1 or i== 2: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)
elif i== 0: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('3 locations',img_cp)
cv2.waitKey(100)
return img_cp
def debug_kalman_locations(img, gt_location, yolo_location):
img_cp = img.copy()
for i in range(2): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blu3 for yolo_kalman
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('2 locations',img_cp)
cv2.waitKey(100)
return img_cp
def debug_2_locations( img, gt_location, yolo_location):
img_cp = img.copy()
for i in range(3): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blur for yolo
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
if i == 1: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)
elif i== 0: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('2 locations',img_cp)
cv2.waitKey(100)
return img_cp
def save_rolo_output(out_fold, rolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, rolo_output)
def save_rolo_output(out_fold, rolo_output, step, num_steps, batch_size):
assert(len(rolo_output)== batch_size)
st= step * batch_size * num_steps - 2
for i in range(batch_size):
id = st + (i + 1)* num_steps + 1
pred = rolo_output[i]
path = os.path.join(out_fold, str(id))
np.save(path, pred)
def save_rolo_output_test( out_fold, rolo_output, step, num_steps, batch_size):
assert(len(rolo_output)== batch_size)
st= step - 2 #* batch_size * num_steps
for i in range(batch_size):
id = st + (i + 1)* num_steps + 1
pred = rolo_output[i]
path = os.path.join(out_fold, str(id))
np.save(path, pred)
def save_rolo_output_heatmap( out_fold, rolo_heat, step, num_steps, batch_size):
assert(len(rolo_heat)== batch_size)
st= step - 2 #* batch_size * num_steps
for i in range(batch_size):
id = st + (i + 1)* num_steps + 1
pred = rolo_heat[i]
path = os.path.join(out_fold, str(id))
np.save(path, pred)
def locations_normal(wid, ht, locations):
#print("location in func: ", locations)
wid *= 1.0
ht *= 1.0
locations[0] *= wid
locations[1] *= ht
locations[2] *= wid
locations[3] *= ht
return locations
def locations_from_0_to_1(wid, ht, locations):
#print("location in func: ", locations[0][0])
wid *= 1.0
ht *= 1.0
for i in range(len(locations)):
# convert top-left point (x,y) to mid point (x, y)
locations[i][0] += locations[i][2] / 2.0
locations[i][1] += locations[i][3] / 2.0
# convert to [0, 1]
locations[i][0] /= wid
locations[i][1] /= ht
locations[i][2] /= wid
locations[i][3] /= ht
return locations
def validate_box(box):
for i in range(len(box)):
if math.isnan(box[i]): box[i] = 0
def iou(box1, box2):
# Prevent NaN in benchmark results
validate_box(box1)
validate_box(box2)
# change float to int, in order to prevent overflow
box1 = map(int, box1)
box2 = map(int, box2)
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb <= 0 or lr <= 0 :
intersection = 0
#print "intersection= 0"
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def iou_0_1(box1, box2, w, h):
box1 = locations_normal(w,h,box1)
box2 = locations_normal(w,h,box2)
#print box1
#print box2
return iou(box1,box2)
def cal_rolo_IOU(location, gt_location):
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = iou(location, gt_location)
return loss
def cal_yolo_IOU(location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = iou(location, gt_location)
return loss
def cal_benchmark_IOU(location, gt_location):
loss = iou(location, gt_location)
return loss
def cal_rolo_score(location, gt_location, thresh):
rolo_iou = cal_rolo_IOU(location, gt_location)
if rolo_iou >= thresh:
score = 1
else:
score = 0
return score
def cal_yolo_score(location, gt_location, thresh):
yolo_iou = cal_yolo_IOU(location, gt_location)
if yolo_iou >= thresh:
score = 1
else:
score = 0
return score
def cal_yolo_kalman_score(location, gt_location, thresh):
yolo_iou = iou(location, gt_location)
if yolo_iou >= thresh:
score = 1
else:
score = 0
return score
def cal_benchmark_score(location, gt_location, thresh):
benchmark_iou = cal_benchmark_IOU(location, gt_location)
if benchmark_iou >= thresh:
score = 1
else:
score = 0
return score
def load_yolo_output_test(fold, batch_size, num_steps, id):
paths = [os.path.join(fold,fn) for fn in next(os.walk(fold))[2]]
paths = sorted(paths)
st= id
ed= id + batch_size*num_steps
paths_batch = paths[st:ed]
yolo_output_batch= []
ct= 0
for path in paths_batch:
ct += 1
yolo_output = np.load(path)
#print(yolo_output)
yolo_output= np.reshape(yolo_output, 4102)
yolo_output_batch.append(yolo_output)
yolo_output_batch= np.reshape(yolo_output_batch, [batch_size*num_steps, 4102])
return yolo_output_batch
def choose_video_sequence(test):
# For VOT-30:
if test == 0:
w_img, h_img = [480, 640]
sequence_name = 'Human2'
training_iters = 250
testing_iters = 1128
elif test == 1:
w_img, h_img = [320, 240]
sequence_name = 'Human9'
training_iters = 70
testing_iters = 302
elif test == 2:
w_img, h_img = [320, 240]
sequence_name = 'Suv'
training_iters = 314
testing_iters = 943
elif test == 3:
w_img, h_img = [640, 480]
sequence_name = 'BlurBody'
training_iters = 111
testing_iters = 334
elif test == 4:
w_img, h_img = [640, 480]
sequence_name = 'BlurCar1'
training_iters = 247
testing_iters = 742#988
elif test == 5:
w_img, h_img = [352, 240]
sequence_name = 'Dog'
training_iters = 42
testing_iters = 127
elif test == 6:
w_img, h_img = [624, 352]
sequence_name = 'Singer2'
training_iters = 121
testing_iters = 366
elif test == 7:
w_img, h_img = [352, 288]
sequence_name = 'Woman'
training_iters = 198
testing_iters = 597
elif test == 8:
w_img, h_img = [640, 480]
sequence_name = 'David3'
training_iters = 83
testing_iters = 252
elif test == 9:
w_img, h_img = [320, 240]
sequence_name = 'Human7'
training_iters = 83
testing_iters = 250
elif test == 10:
w_img, h_img = [720, 400]
sequence_name = 'Bird1'
training_iters = 135
testing_iters = 408
elif test == 11:
w_img, h_img = [360, 240]
sequence_name = 'Car4'
training_iters = 219
testing_iters = 659
elif test == 12:
w_img, h_img = [320, 240]
sequence_name = 'CarDark'
training_iters = 130
testing_iters = 393
elif test == 13:
w_img, h_img = [320, 240]
sequence_name = 'Couple'
training_iters = 46
testing_iters = 140
elif test == 14:
w_img, h_img = [400, 224]
sequence_name = 'Diving'
training_iters = 71
testing_iters = 214
elif test == 15:
w_img, h_img = [480, 640]
sequence_name = 'Human3'
training_iters = 565
testing_iters = 1698
elif test == 16:
w_img, h_img = [480, 640]
sequence_name = 'Human6'
training_iters = 263
testing_iters = 792
elif test == 17:
w_img, h_img = [624, 352]
sequence_name = 'Singer1'
training_iters = 116
testing_iters = 351
elif test == 18:
w_img, h_img = [384, 288]
sequence_name = 'Walking2'
training_iters = 166
testing_iters = 500
elif test == 19:
w_img, h_img = [640, 480]
sequence_name = 'BlurCar3'
training_iters = 117
testing_iters = 356
elif test == 20:
w_img, h_img = [640, 480]
sequence_name = 'Girl2'
training_iters = 499
testing_iters = 1500
elif test == 21:
w_img, h_img = [640, 360]
sequence_name = 'Skating1'
training_iters = 133
testing_iters = 400
elif test == 22:
w_img, h_img = [320, 240]
sequence_name = 'Skater'
training_iters = 50
testing_iters = 160
elif test == 23:
w_img, h_img = [320, 262]
sequence_name = 'Skater2'
training_iters = 144
testing_iters = 435
elif test == 24:
w_img, h_img = [320, 246]
sequence_name = 'Dancer'
training_iters = 74
testing_iters = 225
elif test == 25:
w_img, h_img = [320, 262]
sequence_name = 'Dancer2'
training_iters = 49
testing_iters = 150
elif test == 26:
w_img, h_img = [640, 272]
sequence_name = 'CarScale'
training_iters = 81
testing_iters = 252
elif test == 27:
w_img, h_img = [426, 234]
sequence_name = 'Gym'
training_iters = 255
testing_iters = 767
elif test == 28:
w_img, h_img = [320, 240]
sequence_name = 'Human8'
training_iters = 42
testing_iters = 128
elif test == 29:
w_img, h_img = [416, 234]
sequence_name = 'Jump'
training_iters = 40
testing_iters = 122
# For MOT 2016:
# training
elif test == 30:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-02'
training_iters = 199
testing_iters = 600
elif test == 31:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-04'
training_iters = 349
testing_iters = 1050
elif test == 32:
w_img, h_img = [640, 480]
sequence_name = 'MOT16-05'
training_iters = 278
testing_iters = 837
elif test == 33:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-09'
training_iters = 174
testing_iters = 525
elif test == 34:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-10'
training_iters = 217
testing_iters = 654
elif test == 35:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-11'
training_iters = 299
testing_iters = 900
elif test == 36:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-13'
training_iters = 249
testing_iters = 750
# testing
elif test == 37:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-01'
training_iters = 149
testing_iters = 450
elif test == 38:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-03'
training_iters = 499
testing_iters = 1500
elif test == 39:
w_img, h_img = [640, 480]
sequence_name = 'MOT16-06'
training_iters = 397
testing_iters = 1194
elif test == 40:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-07'
training_iters = 166
testing_iters = 500
elif test == 41:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-08'
training_iters = 208
testing_iters = 625
elif test == 42:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-12'
training_iters = 299
testing_iters = 900
elif test == 43:
w_img, h_img = [1920, 1080]
sequence_name = 'MOT16-14'
training_iters = 249
testing_iters = 750
# For performance test only
elif test == 90:
w_img, h_img = [352, 288]
sequence_name = 'Jogging_1'
training_iters = 100
testing_iters = 300
elif test == 91:
w_img, h_img = [352, 288]
sequence_name = 'Jogging_2'
training_iters = 100
testing_iters = 300
elif test == 92:
w_img, h_img = [640, 480]
sequence_name = 'Boy'
training_iters = 199
testing_iters = 602
elif test == 93:
w_img, h_img = [352, 288]
sequence_name = 'Jumping'
training_iters = 103
testing_iters = 313
elif test == 94:
w_img, h_img = [480, 360]
sequence_name = 'Surfer'
training_iters = 125
testing_iters = 376
elif test == 95:
w_img, h_img = [640, 332]
sequence_name = 'Trans'
training_iters = 41
testing_iters = 124
elif test == 96:
w_img, h_img = [640, 360]
sequence_name = 'DragonBaby'
training_iters = 37
testing_iters = 113
elif test == 97:
w_img, h_img = [640, 480]
sequence_name = 'Liquor'
training_iters = 580
testing_iters = 1741
return [w_img, h_img, sequence_name, training_iters, testing_iters]
|
|
#!/usr/bin/env python
# Copyright (c) 2015, Carnegie Mellon University
# All rights reserved.
# Authors: Siddhartha Srinivasa <siddh@cs.cmu.edu>
# Authors: Michael Koval <mkoval@cs.cmu.edu>
# Authors: David Butterworth <dbworth@cmu.edu>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging
import numpy
import openravepy
from .. import util
from base import BasePlanner, PlanningError, ClonedPlanningMethod, Tags
from ..collision import SimpleRobotCollisionChecker
from enum import Enum
from openravepy import CollisionOptions, CollisionOptionsStateSaver
logger = logging.getLogger(__name__)
class TerminationError(PlanningError):
def __init__(self):
super(TerminationError, self).__init__('Terminated by callback.')
class TimeLimitError(PlanningError):
def __init__(self):
super(TimeLimitError, self).__init__('Reached time limit.')
class Status(Enum):
'''
CONTINUE - keep going
TERMINATE - stop gracefully and output the CACHEd trajectory
CACHE_AND_CONTINUE - save the current trajectory and CONTINUE.
return the saved trajectory if TERMINATEd.
CACHE_AND_TERMINATE - save the current trajectory and TERMINATE
'''
TERMINATE = -1
CACHE_AND_CONTINUE = 0
CONTINUE = 1
CACHE_AND_TERMINATE = 2
@classmethod
def DoesTerminate(cls, status):
return status in [cls.TERMINATE, cls.CACHE_AND_TERMINATE]
@classmethod
def DoesCache(cls, status):
return status in [cls.CACHE_AND_CONTINUE, cls.CACHE_AND_TERMINATE]
class VectorFieldPlanner(BasePlanner):
def __init__(self, robot_collision_checker=SimpleRobotCollisionChecker):
super(VectorFieldPlanner, self).__init__()
self.robot_collision_checker = robot_collision_checker
def __str__(self):
return 'VectorFieldPlanner'
@ClonedPlanningMethod
def PlanToEndEffectorPose(self, robot, goal_pose, timelimit=5.0,
pose_error_tol=0.01,
integration_interval=10.0,
**kw_args):
"""
Plan to an end effector pose by following a geodesic loss function
in SE(3) via an optimized Jacobian.
@param robot
@param goal_pose desired end-effector pose
@param timelimit time limit before giving up
@param pose_error_tol in meters
@param integration_interval The time interval to integrate over
@return traj
"""
manip = robot.GetActiveManipulator()
def vf_geodesic():
"""
Define a joint-space vector field, that moves along the
geodesic (shortest path) from the start pose to the goal pose.
"""
twist = util.GeodesicTwist(manip.GetEndEffectorTransform(),
goal_pose)
dqout, tout = util.ComputeJointVelocityFromTwist(
robot, twist, joint_velocity_limits=numpy.PINF)
# Go as fast as possible
vlimits = robot.GetDOFVelocityLimits(robot.GetActiveDOFIndices())
return min(abs(vlimits[i] / dqout[i])
if dqout[i] != 0. else 1.
for i in xrange(vlimits.shape[0])) * dqout
def CloseEnough():
"""
The termination condition.
At each integration step, the geodesic error between the
start and goal poses is compared. If within threshold,
the integration will terminate.
"""
pose_error = util.GetGeodesicDistanceBetweenTransforms(
manip.GetEndEffectorTransform(), goal_pose)
if pose_error < pose_error_tol:
return Status.TERMINATE
return Status.CONTINUE
traj = self.FollowVectorField(robot, vf_geodesic, CloseEnough,
integration_interval,
timelimit,
**kw_args)
# Flag this trajectory as unconstrained. This overwrites the
# constrained flag set by FollowVectorField.
util.SetTrajectoryTags(traj, {Tags.CONSTRAINED: False}, append=True)
return traj
@ClonedPlanningMethod
def PlanToEndEffectorOffset(self, robot, direction, distance,
max_distance=None, timelimit=5.0,
position_tolerance=0.01,
angular_tolerance=0.15,
integration_interval=10.0,
**kw_args):
"""
Plan to a desired end-effector offset with move-hand-straight
constraint. Movement less than distance will return failure.
The motion will not move further than max_distance.
@param robot
@param direction unit vector in the direction of motion
@param distance minimum distance in meters
@param max_distance maximum distance in meters
@param timelimit timeout in seconds
@param position_tolerance constraint tolerance in meters
@param angular_tolerance constraint tolerance in radians
@param integration_interval The time interval to integrate over
@return traj
"""
if distance < 0:
raise ValueError('Distance must be non-negative.')
elif numpy.linalg.norm(direction) == 0:
raise ValueError('Direction must be non-zero')
elif max_distance is not None and max_distance < distance:
raise ValueError('Max distance is less than minimum distance.')
elif position_tolerance < 0:
raise ValueError('Position tolerance must be non-negative.')
elif angular_tolerance < 0:
raise ValueError('Angular tolerance must be non-negative.')
# Normalize the direction vector.
direction = numpy.array(direction, dtype='float')
direction = util.NormalizeVector(direction)
manip = robot.GetActiveManipulator()
Tstart = manip.GetEndEffectorTransform()
def vf_straightline():
"""
Function defining a joint-space vector field.
"""
twist = util.GeodesicTwist(manip.GetEndEffectorTransform(),
Tstart)
twist[0:3] = direction
dqout, _ = util.ComputeJointVelocityFromTwist(
robot, twist, joint_velocity_limits=numpy.PINF)
return dqout
def TerminateMove():
"""
Function defining the termination condition.
Fail if deviation larger than position and angular tolerance.
Succeed if distance moved is larger than max_distance.
Cache and continue if distance moved is larger than distance.
"""
from .exceptions import ConstraintViolationPlanningError
Tnow = manip.GetEndEffectorTransform()
geodesic_error = util.GeodesicError(Tstart, Tnow)
orientation_error = geodesic_error[3]
position_error = geodesic_error[0:3]
if numpy.fabs(orientation_error) > angular_tolerance:
raise ConstraintViolationPlanningError(
'Deviated from orientation constraint.')
distance_moved = numpy.dot(position_error, direction)
position_deviation = numpy.linalg.norm(position_error -
distance_moved * direction)
if position_deviation > position_tolerance:
raise ConstraintViolationPlanningError(
'Deviated from straight line constraint.')
if max_distance is None:
if distance_moved > distance:
return Status.CACHE_AND_TERMINATE
elif distance_moved > max_distance:
return Status.TERMINATE
elif distance_moved >= distance:
return Status.CACHE_AND_CONTINUE
return Status.CONTINUE
return self.FollowVectorField(robot, vf_straightline, TerminateMove,
integration_interval, timelimit,
**kw_args)
@ClonedPlanningMethod
def PlanWorkspacePath(self, robot, traj,
timelimit=5.0,
position_tolerance=0.01,
angular_tolerance=0.15,
t_step=0.001,
Kp_ff=None,
Kp_e=None,
integration_interval=10.0,
**kw_args):
"""
Plan a configuration space path given a workspace path.
Trajectory timing information is ignored.
@param openravepy.Robot robot: The robot.
@param openravepy.Trajectory traj: Workspace trajectory,
represented as an
OpenRAVE AffineTrajectory.
@param float timelimit: Max planning time (seconds).
@param float position_tolerance: Constraint tolerance (meters).
@param float angular_tolerance: Constraint tolerance (radians).
@param float t_step: Time step to find vector tanget to current
position on the trajectory, using finite
differences.
@param numpy.array Kp_ff: Feed-forward gain.
A 1x6 vector, where first 3 elements
affect the translational velocity,
the last 3 elements affect the
rotational velocity.
@param numpy.array Kp_e: Error gain.
A 1x6 vector, where first 3 elements
affect the translational velocity,
the last 3 elements affect the
rotational velocity.
@param integration_interval The time interval to integrate over.
@return openravepy.Trajectory qtraj: Configuration space path.
"""
if not util.IsTrajectoryTypeIkParameterizationTransform6D(traj):
raise ValueError("Trajectory is not a workspace trajectory, it "
"must have configuration specification of "
"openravepy.IkParameterizationType.Transform6D")
if util.IsTimedTrajectory(traj):
raise ValueError("PlanWorkspacePath expected an un-timed "
"trajectory.")
if position_tolerance < 0.0:
raise ValueError('Position tolerance must be non-negative.')
elif angular_tolerance < 0.0:
raise ValueError('Angular tolerance must be non-negative.')
# Time the trajectory based on its distance
traj = util.ComputeGeodesicUnitTiming(traj, env=None, alpha=1.0)
# Set the default gains
if Kp_ff is None:
Kp_ff = 0.4 * numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
if Kp_e is None:
Kp_e = 1.0 * numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
manip = robot.GetActiveManipulator()
# Get the final end-effector pose
duration = traj.GetDuration()
T_ee_goal = openravepy.matrixFromPose(traj.Sample(duration)[0:7])
def vf_path():
"""
Function defining a joint-space vector field.
"""
T_ee_actual = manip.GetEndEffectorTransform()
# Find where we are on the goal trajectory by finding
# the the closest point
(_, t, _) = util.GetMinDistanceBetweenTransformAndWorkspaceTraj(
T_ee_actual, traj, 0.0005)
# Get the desired end-effector transform from
# the goal trajectory
desired_T_ee = openravepy.matrixFromPose(traj.Sample(t)[0:7])
# Get the next end-effector transform, using finite-differences
pose_ee_next = traj.Sample(t + t_step)[0:7]
desired_T_ee_next = openravepy.matrixFromPose(pose_ee_next)
# Get the translation tangent to current position
tangent_vec = desired_T_ee_next[0:3, 3] - desired_T_ee[0:3, 3]
# Get the translational error
position_error_vec = desired_T_ee[0:3, 3] - T_ee_actual[0:3, 3]
# Get the translational error perpendicular to the path
tangent_trans_error = \
position_error_vec - numpy.dot(
position_error_vec, util.NormalizeVector(tangent_vec))
tangent_trans_error = numpy.nan_to_num(tangent_trans_error)
# The twist between the actual end-effector position and
# where it should be on the goal trajectory
# (the error term)
twist_perpendicular = util.GeodesicTwist(T_ee_actual,
desired_T_ee)
twist_perpendicular[0:3] = tangent_trans_error
# The twist tangent to where the end-effector should be
# on the goal trajectory
# (the feed-forward term)
twist_parallel = util.GeodesicTwist(desired_T_ee,
desired_T_ee_next)
# Normalize the translational and angular velocity of
# the feed-forward twist
twist_parallel[0:3] = util.NormalizeVector(twist_parallel[0:3])
twist_parallel[3:6] = util.NormalizeVector(twist_parallel[3:6])
# Apply gains
twist = Kp_e * twist_perpendicular + Kp_ff * twist_parallel
# Calculate joint velocities using an optimized jacobian
dqout, _ = util.ComputeJointVelocityFromTwist(
robot, twist, joint_velocity_limits=numpy.PINF)
return dqout
def TerminateMove():
"""
Function defining the termination condition.
Fail if deviation larger than position and angular tolerance.
Succeed if distance moved is larger than max_distance.
Cache and continue if distance moved is larger than distance.
"""
from .exceptions import ConstraintViolationPlanningError
T_ee_curr = manip.GetEndEffectorTransform()
# Find where we are on the goal trajectory by finding
# the the closest point
(_, t, _) = util.GetMinDistanceBetweenTransformAndWorkspaceTraj(
T_ee_curr, traj, 0.0005)
# Get the desired end-effector transform from
# the goal trajectory
desired_T_ee = openravepy.matrixFromPose(traj.Sample(t)[0:7])
# Get the position vector tangent to the trajectory,
# using finite-differences
pose_ee_next = traj.Sample(t + t_step)[0:7]
desired_T_ee_next = openravepy.matrixFromPose(pose_ee_next)
tangent_vec = desired_T_ee_next[0:3, 3] - desired_T_ee[0:3, 3]
# Calculate error between current end-effector pose
# and where we should be on the goal trajectory
geodesic_error = util.GeodesicError(desired_T_ee, T_ee_curr)
orientation_error = geodesic_error[3]
position_error_vec = geodesic_error[0:3]
# Use only the translation error that is perpendicular
# to our current position
tangent_trans_error = \
position_error_vec - numpy.dot(
position_error_vec, util.NormalizeVector(tangent_vec))
tangent_trans_error = numpy.nan_to_num(tangent_trans_error)
position_error = tangent_trans_error
if numpy.fabs(orientation_error) > angular_tolerance:
raise ConstraintViolationPlanningError(
'Deviated from orientation constraint.')
position_deviation = numpy.linalg.norm(position_error)
if position_deviation > position_tolerance:
raise ConstraintViolationPlanningError(
'Deviated from straight line constraint.')
# Check if we have reached the end of the goal trajectory
error_to_goal = util.GeodesicError(T_ee_curr, T_ee_goal)
orientation_error = error_to_goal[3] # radians
position_error = error_to_goal[0:3] # x,y,z
if ((numpy.fabs(orientation_error) < angular_tolerance) and
(numpy.linalg.norm(position_error) < position_tolerance)):
return Status.CACHE_AND_TERMINATE
return Status.CONTINUE
return self.FollowVectorField(robot, vf_path, TerminateMove,
integration_interval,
timelimit, **kw_args)
@ClonedPlanningMethod
def FollowVectorField(self, robot, fn_vectorfield, fn_terminate,
integration_time_interval=10.0, timelimit=5.0,
sampling_func=util.SampleTimeGenerator,
norm_order=2, **kw_args):
"""
Follow a joint space vectorfield to termination.
@param robot
@param fn_vectorfield a vectorfield of joint velocities
@param fn_terminate custom termination condition
@param integration_time_interval The time interval to integrate
over.
@param timelimit time limit before giving up
@param sampling_func sample generator to compute validity checks
Note: Function will terminate as soon as invalid configuration is
encountered. No more samples will be requested from the
sampling_func after this occurs.
@param norm_order order of norm to use for collision checking
@param kw_args keyword arguments to be passed to fn_vectorfield
@return traj
"""
from .exceptions import (
CollisionPlanningError,
SelfCollisionPlanningError,
)
from openravepy import CollisionReport, RaveCreateTrajectory
from ..util import GetLinearCollisionCheckPts
import time
import scipy.integrate
CheckLimitsAction = openravepy.KinBody.CheckLimitsAction
# This is a workaround to emulate 'nonlocal' in Python 2.
nonlocals = {
'exception': None,
't_cache': None,
't_check': 0.,
}
env = robot.GetEnv()
active_indices = robot.GetActiveDOFIndices()
# Create a new trajectory matching the current
# robot's joint configuration specification
cspec = robot.GetActiveConfigurationSpecification('linear')
cspec.AddDeltaTimeGroup()
cspec.ResetGroupOffsets()
path = RaveCreateTrajectory(env, '')
path.Init(cspec)
time_start = time.time()
def fn_wrapper(t, q):
"""
The integrator will try to solve this equation
at each time step.
Note: t is the integration time and is non-monotonic.
"""
# Set the joint values, without checking the joint limits
robot.SetActiveDOFValues(q, CheckLimitsAction.Nothing)
return fn_vectorfield()
def fn_status_callback(t, q):
"""
Check joint-limits and collisions for a specific joint
configuration. This is called multiple times at DOF
resolution in order to check along the entire length of the
trajectory.
Note: This is called by fn_callback, which is currently
called after each integration time step, which means we are
doing more checks than required.
"""
if time.time() - time_start >= timelimit:
raise TimeLimitError()
# Check joint position limits.
# We do this before setting the joint angles.
util.CheckJointLimits(robot, q)
robot.SetActiveDOFValues(q)
# Check collision (throws an exception on collision)
robot_checker.VerifyCollisionFree()
# Check the termination condition.
status = fn_terminate()
if Status.DoesCache(status):
nonlocals['t_cache'] = t
if Status.DoesTerminate(status):
raise TerminationError()
def fn_callback(t, q):
"""
This is called at every successful integration step.
"""
try:
# Add the waypoint to the trajectory.
waypoint = numpy.zeros(cspec.GetDOF())
cspec.InsertDeltaTime(waypoint, t - path.GetDuration())
cspec.InsertJointValues(waypoint, q, robot, active_indices, 0)
path.Insert(path.GetNumWaypoints(), waypoint)
# Run constraint checks at DOF resolution.
if path.GetNumWaypoints() == 1:
checks = [(t, q)]
else:
# TODO: This will recheck the entire trajectory
# Ideally should just check the new portion of the trajectory
checks = GetLinearCollisionCheckPts(robot, path,
norm_order=norm_order,
sampling_func=sampling_func)
# start_time=nonlocals['t_check'])
for t_check, q_check in checks:
fn_status_callback(t_check, q_check)
# Record the time of this check so we continue checking at
# DOF resolution the next time the integrator takes a step.
nonlocals['t_check'] = t_check
return 0 # Keep going.
except PlanningError as e:
nonlocals['exception'] = e
return -1 # Stop.
with CollisionOptionsStateSaver(self.env.GetCollisionChecker(),
CollisionOptions.ActiveDOFs):
# Instantiate a robot checker
robot_checker = self.robot_collision_checker(robot)
# Integrate the vector field to get a configuration space path.
#
# TODO: Tune the integrator parameters.
#
# Integrator: 'dopri5'
# DOPRI (Dormand & Prince 1980) is an explicit method for solving ODEs.
# It is a member of the Runge-Kutta family of solvers.
integrator = scipy.integrate.ode(f=fn_wrapper)
integrator.set_integrator(name='dopri5',
first_step=0.1,
atol=1e-3,
rtol=1e-3)
# Set function to be called at every successful integration step.
integrator.set_solout(fn_callback)
integrator.set_initial_value(y=robot.GetActiveDOFValues(), t=0.)
integrator.integrate(t=integration_time_interval)
t_cache = nonlocals['t_cache']
exception = nonlocals['exception']
if t_cache is None:
raise exception or PlanningError(
'An unknown error has occurred.', deterministic=True)
elif exception:
logger.warning('Terminated early: %s', str(exception))
# Remove any parts of the trajectory that are not cached. This also
# strips the (potentially infeasible) timing information.
output_cspec = robot.GetActiveConfigurationSpecification('linear')
output_path = RaveCreateTrajectory(env, '')
output_path.Init(output_cspec)
# Add all waypoints before the last integration step. GetWaypoints does
# not include the upper bound, so this is safe.
cached_index = path.GetFirstWaypointIndexAfterTime(t_cache)
output_path.Insert(0, path.GetWaypoints(0, cached_index), cspec)
# Add a segment for the feasible part of the last integration step.
output_path.Insert(output_path.GetNumWaypoints(),
path.Sample(t_cache),
cspec)
util.SetTrajectoryTags(output_path, {
Tags.SMOOTH: True,
Tags.CONSTRAINED: True,
Tags.DETERMINISTIC_TRAJECTORY: True,
Tags.DETERMINISTIC_ENDPOINT: True,
}, append=True)
return output_path
|
|
class cell:
def __init__(self, y, x, icon, cellnum, board):
self.chricon = icon
self.coords = [y, x]
self.evopoints = 0
self.idnum = cellnum
self.playable = False
self.learnedmutations = {
'move' : False,
'sight' : False,
'strike' : True, #Basic damaging strike
'wall' : False, #Passes turn but doubles def
'leap' : True #Moves you two spaces in any direction
}
self.buffs = {
'alive' : True,
'phased' : False,
'wall' : False,
'hurt' : False,
'paralyzed' : False,
'move' : False
}
self.directionIDs = {
'north' : 0,
'south' : 1,
'east' : 2,
'west' : 3,
'NE' : 4,
'NW' : 5,
'SE' : 6,
'SW' : 7
}
#Stat variables
self.level = 1
self.hp = 10
self.maxhp = 10
self.damage = 1
self.attack = 1 #% damage increase
self.defense = 1 #% damage reduction
self.agility = 1 #Affects dodge chance and crit damage
self.critchance = 25 #% chance to crit
self.critdamage = 200 #% increase to base damage when critting
self.healmult = 100 #% effectiveness of healing
#TODO: Learn mutation functions
#Will take the form "self.learnfoo"
#Where "foo" is the same word used in the flag
#Set spawned tile as occupied
self.updateLocation(self.coords,board)
def learnMove(self):
self.mutmove = True
if not self.buffs['paralyzed']:
self.setBuff('move', True)
#TODO: Dodge chance
def hurt(self,amt,board):
dmg = amt / (1 + (self.defense / 100.0))
if dmg >= self.hp:
self.kill(board)
self.hp = 0
elif dmg > 0:
self.hp -= dmg
self.setBuff('hurt', True)
def heal(self,amt,healmult):
healamt = amt * (healmult / 100)
if (self.hp + healamt) > self.maxhp:
self.hp = self.maxhp
elif healamt > 0:
self.hp += healamt
#TODO: Status effects
def setParalyzed(self):
self.setBuff('move', False)
self.setBuff('paralyzed', True)
#TODO: Active Ability Effects
#Just processes effects, doesn't check for range or anything else
def doStrike(self, targetcells, board):
amt = self.damage * (1 + (self.attack / 100))
for i in targetcells:
if self.checkCrit:
amt *= (self.critdamage / 100.0)
i.hurt(amt,board)
def doWall(self):
self.defense *= 2
self.setBuff('wall', True)
def doLeap(self,board,direction):
if direction == 0:
self.moveNorth(board,2)
elif direction == 1:
self.moveSouth(board,2)
elif direction == 2:
self.moveEast(board,2)
elif direction == 3:
self.moveWest(board,2)
elif direction == 4:
self.moveNE(board,2)
elif direction == 5:
self.moveNW(board,2)
elif direction == 6:
self.moveSE(board,2)
elif direction == 7:
self.moveSW(board,2)
def checkCrit(self):
#TODO: Critical strikes
return False
def kill(self, board):
self.setBuff('alive', False)
tile = board.getTile(self.coords)
tile.setOccupied(False)
def clearStatus(self):
if self.mutmove and not self.buffs['move']:
self.setBuff('move', True)
self.setBuff('paralyzed', False)
def isPlayable(self):
return self.playable
def isAlive(self):
return self.buffs['alive']
def getIcon(self):
return self.chricon
#Returns a list of form [y, x, icon], also referred to as a cell type
def getFormattedList(self):
return [self.coords[0], self.coords[1], self.chricon]
#Returns list of coordinates in form [y, x]
def getCoords(self):
return self.coords
def updateLocation(self, dest, board):
tileprev = board.getTile(self.coords)
tilenew = board.getTile(dest)
tileprev.occupied = False
#if not self.buffs['phased']:
self.coords = dest
tilenew.occupied = True
def checkCollision(self, dest, board):
oldtile = board.getTile(self.coords)
tile = board.getTile(dest)
passable = tile.isPassable()
if passable:
self.updateLocation(dest, board)
def moveNorth(self, board, amt=1):
dest = [self.coords[0] - amt, self.coords[1]]
self.checkCollision(dest, board)
def moveSouth(self, board, amt=1):
dest = [self.coords[0] + amt, self.coords[1]]
self.checkCollision(dest, board)
def moveEast(self, board, amt=1):
dest = [self.coords[0], self.coords[1] + amt]
self.checkCollision(dest, board)
def moveWest(self, board, amt=1):
dest = [self.coords[0], self.coords[1] - amt]
self.checkCollision(dest, board)
def moveNE(self, board, amt=1):
dest = [self.coords[0] - amt, self.coords[1] + amt]
self.checkCollision(dest, board)
def moveNW(self, board, amt=1):
dest = [self.coords[0] - amt, self.coords[1] - amt]
self.checkCollision(dest, board)
def moveSE(self, board, amt=1):
dest = [self.coords[0] + amt, self.coords[1] + amt]
self.checkCollision(dest, board)
def moveSW(self, board, amt=1):
dest = [self.coords[0] + amt, self.coords[1] - amt]
self.checkCollision(dest, board)
#Helper functions
def calcDist(self, y1, x1, y2, x2):
y3 = pow(y2 - y1, 2)
x3 = pow(x2 - x1, 2)
ret = pow(x3 + y3, 0.5)
return ret
def isWithinRange(self, y1, x1, y2, x2, _range):
dist = calcDist(y1, x1, y2, x2)
if dist <= _range:
return True
else:
return False
def setBuff(self, buff, status):
self.buffs[buff] = status
def startOfTurn(self):
if self.buffs['wall']:
self.setBuff('wall', False)
if self.buffs['hurt']:
self.setBuff('hurt', False)
def getCoordinateOffset(self,direction,amt):
if direction == 0: #North
return [self.coords[0] - amt, self.coords[1]]
elif direction == 1: #South
return [self.coords[0] + amt, self.coords[1]]
elif direction == 2: #East
return [self.coords[0], self.coords[1] + amt]
elif direction == 3: #West
return [self.coords[0], self.coords[1] - amt]
elif direction == 4: #North East
return [self.coords[0] - amt, self.coords[1] + amt]
elif direction == 5: #North West
return [self.coords[0] - amt, self.coords[1] - amt]
elif direction == 6: #South East
return [self.coords[0] + amt, self.coords[1] + amt]
elif direction == 7: #South West
return [self.coords[0] + amt, self.coords[1] - amt]
class player(cell):
def __init__(self, y, x, icon, cellnum, board):
cell.__init__(self, y, x, icon, cellnum, board)
self.playable = True
#Key definitions
self.movekeys = {
'north': 'w',
'south': 'x',
'east': 's',
'west': 'a',
'NE': 'f',
'NW': 'q',
'SE': 'c',
'SW': 'z'
}
self.activekeys = {
'strike': 'p',
'leap': 't',
'wall': 'v'
}
self.waitkey = 'r'
def getInput(self, board, window, inpt):
if inpt == self.waitkey:
pass
elif self.buffs['move']:
self.move(self.pickDirection(inpt),board)
if self.learnedmutations['strike']:
if inpt == self.activekeys['strike']:
#TODO: Call strike function
dirinpt = window.getkey()
direction = self.pickDirection(dirinpt)
target = self.getCoordinateOffset(direction,1)
cellList = board.getCells(target)
self.doStrike(cellList, board)
elif self.learnedmutations['leap']:
if inpt == self.activekeys['leap']:
leapinpt = window.getkey()
direction = self.pickDirection(leapinpt)
self.doLeap(board,direction)
elif self.learnedmutations['wall']:
if inpt == self.activekeys['wall']:
self.doWall()
def pickDirection(self, inpt):
if inpt == self.movekeys['north']:
return self.directionIDs['north']
elif inpt == self.movekeys['south']:
return self.directionIDs['south']
elif inpt == self.movekeys['east']:
return self.directionIDs['east']
elif inpt == self.movekeys['west']:
return self.directionIDs['west']
elif inpt == self.movekeys['NE']:
return self.directionIDs['NE']
elif inpt == self.movekeys['NW']:
return self.directionIDs['NW']
elif inpt == self.movekeys['SE']:
return self.directionIDs['SE']
elif inpt == self.movekeys['SW']:
return self.directionIDs['SW']
def move(self, direction, board):
if direction == 0:
self.moveNorth(board, 1)
elif direction == 1:
self.moveSouth(board, 1)
elif direction == 2:
self.moveEast(board, 1)
elif direction == 3:
self.moveWest(board, 1)
elif direction == 4:
self.moveNE(board, 1)
elif direction == 5:
self.moveNW(board, 1)
elif direction == 6:
self.moveSE(board, 1)
elif direction == 7:
self.moveSW(board, 1)
|
|
import argparse, sys, os, yaml, logging, errno, locale
from datetime import datetime as DateTime
import git
class MultiPatchCli(object):
"""Command-line entry point."""
def __init__(self, argv):
self.argv = argv
self.settings = None
def run(self):
parser = self.make_parser()
self.settings = self.parse_args(parser)
self.logger = logging.getLogger("multipatch")
message_format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
time_format = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO, format=message_format,
datefmt=time_format)
command_method = 'run_{0}_command'.format(self.settings.command)
command = getattr(self, command_method, None)
try:
return command()
except self.CliError as ex:
sys.stderr.write(unicode(ex))
sys.stderr.write("\n")
return 1
def run_create_command(self):
"""Configure git's remotes and branches to have the configured remote
repositories and some non-checked out branches."""
repo, tracking = self.get_config()
# Possibly not necessary?
for head in repo.heads:
if head.name == "master":
head.checkout()
break
if repo.active_branch.name != "master":
self.raise_error("repo could not swtich to master")
fetched = set()
def fetch_remote(remote):
if remote.name not in fetched:
self.log("fetching remote {0}", remote.name)
remote.fetch()
fetched.add(remote.name)
for remote in tracking['remotes']:
# Does not seem to be a check.
try:
existing = repo.remotes[remote['name']]
except IndexError:
existing = None
if existing:
self.log("{0} exists; set url to {1}", remote['name'], remote['uri'])
config = existing.config_writer
config.set('url', remote['uri'])
del config
else:
self.log("create remote {0}; set url to {1}", remote['name'], remote['uri'])
repo.create_remote(remote['name'], remote['uri'])
if self.settings.fetch or not tracking['branches']:
fetch_remote(repo.remotes[remote['name']])
for branch in tracking['branches']:
if 'remote' not in branch:
self.log("skip local-only branch {0!r}", branch)
continue
self.log("create branch {0!r}", branch)
remote = repo.remotes[branch['remote']]
# The branches cann't be created until this.
if remote.name not in fetched:
fetch_remote(remote)
remote_branch = remote.refs[branch['branch']]
# Create a local tracking branch without checking it out. This is
# not actually all that useful for logging but can be useful if you
# want to view the actual source.
path = ".".join([remote.name, branch['branch']])
branch = repo.create_head(path, commit=remote_branch.commit)
branch.set_tracking_branch(remote_branch)
return 0
def run_log_command(self):
"""Print the logs of the tracked branches in chronological order."""
try:
branches = self.get_branches_to_log()
self.print_logs_chronologically(branches)
return 0
except KeyboardInterrupt:
# Avoid spammy exception when we quit in the middle.
return 0
def get_branches_to_log(self):
"""Initialise iterators for commits on each branch we want to track."""
repo, tracking = self.get_config_for_logging()
wip = []
for branch in tracking['branches']:
if 'remote' in branch:
remote = repo.remotes[branch['remote']]
ref = remote.refs[branch['branch']]
else:
ref = repo.branches[branch['branch']]
commits = git.objects.Commit.iter_items(repo, ref.name)
try:
top = commits.next()
except StopIteration:
continue
wip.append({'ref': ref, 'top': top, 'iter': commits})
return wip
def print_logs_chronologically(self, wip):
"""
Sort in ascending order of commit date. Print the highest (iow most
recent). If we run out of commits to print then remove from the next
iteration.
"""
last_date = None
while wip:
wip.sort(key=lambda entry: entry['top'].committed_date, reverse=False)
current = wip[-1]
this_date = DateTime.fromtimestamp(current['top'].committed_date).date()
should_print = this_date != last_date
if self.settings.split_days and this_date != last_date:
print("On {0}:".format(this_date.isoformat()))
last_date = this_date
try:
self.print_pretty_log_message(ref=current['ref'], commit=current['top'])
except IOError as ex:
if ex.errno == errno.EPIPE:
return
else:
raise
try:
current['top'] = current['iter'].next()
except StopIteration:
wip.pop()
return
def print_pretty_log_message(self, ref, commit):
words = commit.author.name.split(' ')
initials = u"".join([word[0].upper() for word in words])
message = u" ".join([
unicode(DateTime.fromtimestamp(commit.committed_date)),
commit.hexsha[0:6],
ref.name,
initials,
commit.summary.strip()[0:90]
])
# Very awkward. Default encoding is chosen on stdout by not well
# documented means, probably by the C locale. If it's not a tty then
# it's usually ascii in which case we need to pre-encode or things will
# crash when ever you have a multi-byte commit message, even though the
# git library is dealing with this case properly.
#
# (There is probably a nicer way to do this)
if sys.stdout.isatty():
print(message)
else:
print(message.encode(locale.getpreferredencoding()))
if self.settings.stat and commit.stats.files:
for path, change in commit.stats.files.iteritems():
print " -{0} +{1}".format(change['deletions'], change['insertions']).ljust(10), path
if self.settings.patch:
diffs = commit.diff()
if diffs:
print
for diff in diffs:
print diff
print
def get_config(self):
config, looked_in = self.get_config_file()
if not config:
self.raise_error("no such file: {0}", looked_in)
with open(config) as io:
tracking = yaml.load(io.read())
return git.Repo(self.settings.root), tracking
def get_config_for_logging(self):
"""Use config or git branches and whatnot to find some things to log."""
ignore_config = [self.settings.everything, self.settings.all_masters,
self.settings.all_remotes]
if True not in ignore_config:
repo, tracking = self.get_config()
return repo, self.filter_branches(tracking)
repo = git.Repo(self.settings.root)
remotes = self.find_logables_from_remotes(repo)
locals = self.find_logables_from_locals(repo)
tracking = {}
tracking['branches'] = remotes + locals
return repo, self.filter_branches(tracking)
def find_logables_from_locals(self, repo):
logables = []
for branch in repo.branches:
if self.settings.everything:
logables.append({'branch': branch.name, 'ref': branch})
return logables
def find_logables_from_remotes(self, repo):
logables = []
for remote in repo.remotes:
for ref in remote.refs:
logable = {
'remote': remote.name,
'branch': ref.name.replace(remote.name + "/", ''),
'ref': ref,
}
if self.settings.all_remotes or self.settings.everything:
logables.append(logable)
elif logable['branch'] == "master" and self.settings.all_masters:
logables.append(logable)
return logables
def filter_branches(self, tracking):
"""Remove stuff excluded with -x."""
filtered = []
for entry in tracking['branches']:
skip = False
for exclude in self.settings.exclude:
name = entry.get('remote', '') + "/" + entry['branch']
if exclude in name:
skip = True
if not skip:
filtered.append(entry)
return {'branches': filtered}
def get_config_file(self):
looked_in = [
os.path.join(self.settings.root, ".git", "multipatch.yml"),
]
for path in looked_in:
if os.path.exists(path):
return path, looked_in
return None, looked_in
def make_parser(self):
parser = argparse.ArgumentParser()
commands = parser.add_subparsers(dest="command")
create = commands.add_parser("create")
create.add_argument('root')
create.add_argument('-f', '--fetch', action='store_true',
help="Fetch remotes we aren't using for branches.")
log = commands.add_parser("log")
log.add_argument('root', nargs="?", default=os.getcwd())
log.add_argument("-m", "--all-masters", action="store_true",
help="Show logs from all remotes branches named 'master'.")
log.add_argument("-A", "--all-remotes", action="store_true",
help="Show logs from all remotes.")
log.add_argument("-e", "--everything", action="store_true",
help="Show logs from all remotes.")
log.add_argument("-x", "--exclude", action='append', default=[],
help="Exclude ref names matching.")
log.add_argument("-s", "--stat", action='store_true',
help="Show stat.")
log.add_argument("-p", "--patch", action='store_true',
help="Show patch.")
log.add_argument("-d", "--split-days", action='store_true',
help="Print a header when the day changes.")
return parser
def parse_args(self, parser):
settings = parser.parse_args(self.argv[1:])
return settings
def log(self, message, *parts, **kparts):
message = message.format(*parts, **kparts)
self.logger.info(message)
class CliError(Exception):
pass
def raise_error(self, message, *parts, **kparts):
error = message.format(*parts, **kparts)
raise Exception(error)
def main():
sys.exit(MultiPatchCli(sys.argv).run())
if __name__ == "__main__":
main()
|
|
import httplib as http
import logging
import os
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from dropbox.client import DropboxClient, DropboxOAuth2Flow
from dropbox.rest import ErrorResponse
from flask import request
from framework.auth import Auth
from framework.exceptions import HTTPError
from framework.sessions import session
from osf.models.external import ExternalProvider
from osf.models.files import File, FileNode, Folder
from urllib3.exceptions import MaxRetryError
from addons.base import exceptions
from addons.dropbox import settings
from addons.dropbox.serializer import DropboxSerializer
from website.util import api_v2_url, web_url_for
logger = logging.getLogger(__name__)
class DropboxFileNode(FileNode):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFileNode'
modm_query = None
# /TODO DELETE ME POST MIGRATION
provider = 'dropbox'
class DropboxFolder(DropboxFileNode, Folder):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFolder'
modm_query = None
# /TODO DELETE ME POST MIGRATION
pass
class DropboxFile(DropboxFileNode, File):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFile'
modm_query = None
# /TODO DELETE ME POST MIGRATION
pass
class Provider(ExternalProvider):
name = 'Dropbox'
short_name = 'dropbox'
client_id = settings.DROPBOX_KEY
client_secret = settings.DROPBOX_SECRET
# Explicitly override auth_url_base as None -- DropboxOAuth2Flow handles this for us
auth_url_base = None
callback_url = None
handle_callback = None
@property
def oauth_flow(self):
if 'oauth_states' not in session.data:
session.data['oauth_states'] = {}
if self.short_name not in session.data['oauth_states']:
session.data['oauth_states'][self.short_name] = {
'state': None
}
return DropboxOAuth2Flow(
self.client_id,
self.client_secret,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
session=session.data['oauth_states'][self.short_name], csrf_token_session_key='state'
)
@property
def auth_url(self):
return self.oauth_flow.start('force_reapprove=true')
# Overrides ExternalProvider
def auth_callback(self, user):
# TODO: consider not using client library during auth flow
try:
access_token, dropbox_user_id, url_state = self.oauth_flow.finish(request.values)
except (DropboxOAuth2Flow.NotApprovedException, DropboxOAuth2Flow.BadStateException):
# 1) user cancelled and client library raised exc., or
# 2) the state was manipulated, possibly due to time.
# Either way, return and display info about how to properly connect.
return
except (DropboxOAuth2Flow.ProviderException, DropboxOAuth2Flow.CsrfException):
raise HTTPError(http.FORBIDDEN)
except DropboxOAuth2Flow.BadRequestException:
raise HTTPError(http.BAD_REQUEST)
self.client = DropboxClient(access_token)
info = self.client.account_info()
return self._set_external_account(
user,
{
'key': access_token,
'provider_id': info['uid'],
'display_name': info['display_name'],
}
)
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific dropbox information.
token.
"""
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.addons.dropbox.model.DropboxUserSettings'
modm_query = None
# /TODO DELETE ME POST MIGRATION
oauth_provider = Provider
serializer = DropboxSerializer
def revoke_remote_oauth_access(self, external_account):
"""Overrides default behavior during external_account deactivation.
Tells Dropbox to remove the grant for the OSF associated with this account.
"""
client = DropboxClient(external_account.oauth_key)
try:
client.disable_access_token()
except ErrorResponse:
pass
class NodeSettings(BaseStorageAddon, BaseOAuthNodeSettings):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.addons.dropbox.model.DropboxNodeSettings'
modm_query = None
# /TODO DELETE ME POST MIGRATION
oauth_provider = Provider
serializer = DropboxSerializer
folder = models.TextField(null=True, blank=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Provider(self.external_account)
return self._api
@property
def folder_id(self):
return self.folder
@property
def folder_name(self):
return os.path.split(self.folder or '')[1] or '/ (Full Dropbox)' if self.folder else None
@property
def folder_path(self):
return self.folder
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder)
def clear_settings(self):
self.folder = None
def fetch_folder_name(self):
return self.folder_name
def get_folders(self, **kwargs):
folder_id = kwargs.get('folder_id')
if folder_id is None:
return [{
'addon': 'dropbox',
'id': '/',
'path': '/',
'kind': 'folder',
'name': '/ (Full Dropbox)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id),
params={'id': '/'}
)
}
}]
client = DropboxClient(self.external_account.oauth_key)
file_not_found = HTTPError(http.NOT_FOUND, data={
'message_short': 'File not found',
'message_long': 'The Dropbox file you requested could not be found.'
})
max_retry_error = HTTPError(http.REQUEST_TIMEOUT, data={
'message_short': 'Request Timeout',
'message_long': 'Dropbox could not be reached at this time.'
})
try:
metadata = client.metadata(folder_id)
except ErrorResponse:
raise file_not_found
except MaxRetryError:
raise max_retry_error
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise file_not_found
return [
{
'addon': 'dropbox',
'kind': 'folder',
'id': item['path'],
'name': item['path'].split('/')[-1],
'path': item['path'],
'urls': {
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': item['path']}
)
}
}
for item in metadata['contents']
if item['is_dir']
]
def set_folder(self, folder, auth):
self.folder = folder
# Add log to node
self.nodelogger.log(action='folder_selected', save=True)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder = self.folder
self.clear_settings()
if add_log:
extra = {'folder': folder}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.folder:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file',
path=metadata['path'].strip('/'),
provider='dropbox'
)
self.owner.add_log(
'dropbox_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['path'],
'folder': self.folder,
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def __repr__(self):
return u'<NodeSettings(node_id={self.owner._primary_key!r})>'.format(self=self)
##### Callback overrides #####
def after_delete(self, node, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
|
|
# -*- coding: utf-8 -*-
"""
@file
@brief Helpers, inspired from `utils.py <https://github.com/winpython/winpython/blob/master/winpython/utils.py>`_
"""
from __future__ import print_function
import os
import os.path as osp
import subprocess
import re
import sys
import locale
# =============================================================================
# Patch chebang line (courtesy of Christoph Gohlke)
# =============================================================================
def patch_shebang_line(fname, pad=b' ', fLOG=print):
"""
Remove absolute path to python.exe in shebang lines.
@param python python extractor
@param pad pad
@param fLOG logging function
@return boolean, True if patched, False otherwise
"""
if sys.version_info[0] == 2:
shebang_line = re.compile(r"(#!.+pythonw?\\.exe)") # Python2.7
else:
shebang_line = re.compile(b"(#!.+pythonw?\\.exe)") # Python3+
with open(fname, 'rb') as fh:
content = fh.read()
content = shebang_line.split(content, maxsplit=1)
if len(content) != 3:
return
exe = os.path.basename(content[1][2:])
content[1] = b'#!' + exe + (pad * (len(content[1]) - len(exe) - 2))
content = b''.join(content)
try:
with open(fname, 'wb') as fh:
fh.write(content)
fLOG("[pymy] patched", fname)
return True
except Exception:
fLOG("[pymy] failed to patch", fname)
return False
def get_env(name, current=True):
"""
Return HKCU/HKLM environment variable name and value
@param name name to look for
@param current switch between *HKEY_CURRENT_USER* (True) and *HKEY_LOCAL_MACHINE* (False)
@return tuple (see below)
For example, get_user_env('PATH') may returns::
('Path', u'C:\\Program Files\\Intel\\WiFi\\bin\\')
"""
import winreg
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
key = winreg.OpenKey(root, "Environment")
for index in range(0, winreg.QueryInfoKey(key)[1]):
try:
value = winreg.EnumValue(key, index)
if value[0].lower() == name.lower():
# Return both value[0] and value[1] because value[0] could be
# different from name (lowercase/uppercase)
return value[0], value[1]
except Exception:
break
def set_env(name, value, current=True):
"""
Set HKCU/HKLM environment variables
@param name name to look for
@param current switch between *HKEY_CURRENT_USER* (True) and *HKEY_LOCAL_MACHINE* (False)
"""
import winreg
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
key = winreg.OpenKey(root, "Environment")
try:
_x, key_type = winreg.QueryValueEx(key, name)
except WindowsError:
key_type = winreg.REG_EXPAND_SZ
key = winreg.OpenKey(root, "Environment", 0, winreg.KEY_SET_VALUE)
winreg.SetValueEx(key, name, 0, key_type, value)
from win32gui import SendMessageTimeout
from win32con import (HWND_BROADCAST, WM_SETTINGCHANGE,
SMTO_ABORTIFHUNG)
SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, 0,
"Environment", SMTO_ABORTIFHUNG, 5000)
def create_shortcut(path, description, filename,
arguments="", workdir="", iconpath="", iconindex=0):
"""
Create Windows shortcut (.lnk file)
@param path where to store the link
@param description description
@param filename link name
@param arguments arguments to store
@param workdir working directory
@para iconpath icon
@param iconindex icon index
@return filename
"""
import pythoncom
from win32com.shell import shell
ilink = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IShellLink)
ilink.SetPath(path)
ilink.SetDescription(description)
if arguments:
ilink.SetArguments(arguments)
if workdir:
ilink.SetWorkingDirectory(workdir)
if iconpath or iconindex:
ilink.SetIconLocation(iconpath, iconindex)
# now save it.
ipf = ilink.QueryInterface(pythoncom.IID_IPersistFile)
if not filename.endswith('.lnk'):
filename += '.lnk'
filename = os.path.join(path, filename)
ipf.Save(filename, 0)
return filename
def decode_fs_string(string):
"""Convert string from file system charset to unicode"""
charset = sys.getfilesystemencoding()
if charset is None:
charset = locale.getpreferredencoding()
return string.decode(charset)
def exec_shell_cmd(args, path):
"""Execute shell command (*args* is a list of arguments) in *path*"""
# print " ".join(args)
process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=path, shell=True)
return decode_fs_string(process.stdout.read())
def get_gcc_version(path):
"""Return version of the GCC compiler installed in *path*"""
return exec_shell_cmd('gcc --version', path).splitlines()[0].split()[-1]
def get_r_version(path):
"""Return version of the R installed in *path*"""
return exec_shell_cmd('dir ..\\README.R*', path).splitlines()[-3].split("-")[-1]
def get_julia_version(path):
"""Return version of the Julia installed in *path*"""
return exec_shell_cmd('julia.exe -v', path).splitlines()[0].split(" ")[-1]
def python_query(cmd, path):
"""Execute Python command using the Python interpreter located in *path*"""
res = exec_shell_cmd('python -c "%s"' % cmd, path).splitlines()
if not res:
raise Exception(
"CMD:\n{0}\nRES:\n{1}\nPATH:\n{2}".format(cmd, res, path))
return res[0]
def get_python_infos(path):
"""Return (version, architecture) for the Python distribution located in
*path*. The version number is limited to MAJOR.MINOR, the architecture is
an integer: 32 or 64"""
is_64 = python_query('import sys; print(sys.maxsize > 2**32)', path)
arch = {'True': 64, 'False': 32}.get(is_64, None)
ver = python_query("import sys; print('%d.%d' % (sys.version_info.major, "
"sys.version_info.minor))", path)
if re.match(r'([0-9]*)\.([0-9]*)', ver) is None:
ver = None
return ver, arch
def get_python_long_version(path):
"""Return long version (X.Y.Z) for the Python distribution located in
*path*"""
ver = python_query("import sys; print('%d.%d.%d' % "
"(sys.version_info.major, sys.version_info.minor,"
"sys.version_info.micro))", path)
if re.match(r'([0-9]*)\.([0-9]*)\.([0-9]*)', ver) is None:
ver = None
return ver
# =============================================================================
# Patch sourcefile (instead of forking packages)
# =============================================================================
def patch_sourcefile(fname, in_text, out_text, silent_mode=False):
"""Replace a string in a source file"""
import io
if osp.isfile(fname) and not in_text == out_text:
with io.open(fname, 'r') as fh:
content = fh.read()
new_content = content.replace(in_text, out_text)
if not new_content == content:
if not silent_mode:
print("patching ", fname, "from", in_text, "to", out_text)
with io.open(fname, 'wt') as fh:
fh.write(new_content)
# =============================================================================
# Patch sourcelines (instead of forking packages)
# =============================================================================
def patch_sourcelines(fname, in_line_start, out_line, endline='\n', silent_mode=False):
"""Replace the middle of lines between in_line_start and endline """
import io
import os.path as osp
if osp.isfile(fname):
with io.open(fname, 'r') as fh:
contents = fh.readlines()
content = "".join(contents)
for l in range(len(contents)):
if contents[l].startswith(in_line_start):
begining, middle = in_line_start, contents[
l][len(in_line_start):]
ending = ""
if middle.find(endline) > 0:
ending = endline + \
endline.join(middle.split(endline)[1:])
middle = middle.split(endline)[0]
middle = out_line
new_line = begining + middle + ending
if not new_line == contents[l]:
if not silent_mode:
print(
"patching ", fname, " from\n", contents[l], "\nto\n", new_line)
contents[l] = new_line
new_content = "".join(contents)
if not new_content == content:
# if not silent_mode:
# print("patching ", fname, "from", content, "to", new_content)
with io.open(fname, 'wt') as fh:
try:
fh.write(new_content)
except Exception as e:
print("impossible to patch", fname, "from", content,
"to", new_content, " --- ", str(e).replace("\n", "--"))
WININST_PATTERN = (r'([a-zA-Z0-9\-\_]*|[a-zA-Z\-\_\.]*)-([0-9\.\-]*[a-z]*[0-9]?)(-Qt-([0-9\.]+))?.(win32|win\-amd64)' +
r'(-py([0-9\.]+))?(-setup)?\.exe')
# SOURCE_PATTERN defines what an acceptable source package name is
# As of 2014-09-08 :
# - the wheel package format is accepte in source directory
# - the tricky regexp is tuned also to support the odd jolib naming :
# . joblib-0.8.3_r1-py2.py3-none-any.whl,
# . joblib-0.8.3-r1.tar.gz
SOURCE_PATTERN = r'([a-zA-Z0-9\-\_\.]*)-([0-9\.\_]*[a-z]*[0-9]?)(\.zip|\.tar\.gz|\-(py[2-7]*|py[2-7]*\.py[2-7]*)\-none\-any\.whl)'
# WHEELBIN_PATTERN defines what an acceptable binary wheel package is
# "cp([0-9]*)" to replace per cp(34) for python3.4
# "win32|win\_amd64" to replace per "win\_amd64" for 64bit
WHEELBIN_PATTERN = r'([a-zA-Z0-9\-\_\.]*)-([0-9\.\_]*[a-z0-9\+]*[0-9]?)-cp([0-9]*)\-none\-(win32|win\_amd64)\.whl'
def get_source_package_infos(fname):
"""Return a tuple (name, version) of the Python source package"""
match = re.match(SOURCE_PATTERN, osp.basename(fname))
if match is not None:
return match.groups()[:2]
def do_script(this_script, python_exe=None,
verbose=False, install_options=None):
"""Execute a script (get-pip typically)"""
if python_exe is None:
python_exe = sys.executable
assert osp.isfile(python_exe)
myroot = os.path.dirname(python_exe)
# cmd = [python_exe, myroot + r'\Scripts\pip-script.py', 'install']
cmd = [python_exe]
if install_options:
cmd += install_options # typically ['--no-deps']
print('script install_options', install_options)
cmd += [this_script]
# print('build_wheel', myroot, cmd)
print("Executing ", cmd)
if verbose:
subprocess.call(cmd, cwd=myroot)
else:
p = subprocess.Popen(cmd, cwd=myroot, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
p.stdout.close()
p.stderr.close()
if verbose:
print("Executed " % cmd)
return 'ok'
KEY_C = r"Software\Classes\%s"
KEY_C0 = KEY_C % r"Python.%sFile\shell"
KEY_C1 = KEY_C % r"Python.%sFile\shell\%s"
KEY_C2 = KEY_C1 + r"\command"
KEY_DROP0 = KEY_C % r"Python.%sFile\shellex"
KEY_DROP1 = KEY_C % r"Python.%sFile\shellex\DropHandler"
KEY_I = KEY_C % r"Python.%sFile\DefaultIcon"
KEY_D = KEY_C % r"Python.%sFile"
EWI = "Edit with IDLE"
EWS = "Edit with Spyder"
KEY_S = r"Software\Python"
KEY_S0 = KEY_S + r"\PythonCore"
KEY_S1 = KEY_S0 + r"\%s"
def register(target, current=True):
"""Register a Python distribution in Windows registry"""
import winreg
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
# Extensions
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".py"),
"", 0, winreg.REG_SZ, "Python.File")
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".pyw"),
"", 0, winreg.REG_SZ, "Python.NoConFile")
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".pyc"),
"", 0, winreg.REG_SZ, "Python.CompiledFile")
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".pyo"),
"", 0, winreg.REG_SZ, "Python.CompiledFile")
# MIME types
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".py"),
"Content Type", 0, winreg.REG_SZ, "text/plain")
winreg.SetValueEx(winreg.CreateKey(root, KEY_C % ".pyw"),
"Content Type", 0, winreg.REG_SZ, "text/plain")
# Verbs
python = osp.abspath(osp.join(target, 'python.exe'))
pythonw = osp.abspath(osp.join(target, 'pythonw.exe'))
spyder = osp.abspath(osp.join(target, os.pardir, 'Spyder.exe'))
if not osp.isfile(spyder):
spyder = '%s" "%s\\Scripts\\spyder' % (pythonw, target)
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("", "open")),
"", 0, winreg.REG_SZ, '"%s" "%%1" %%*' % python)
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("NoCon", "open")),
"", 0, winreg.REG_SZ, '"%s" "%%1" %%*' % pythonw)
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("Compiled", "open")),
"", 0, winreg.REG_SZ, '"%s" "%%1" %%*' % python)
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("", EWI)),
"", 0, winreg.REG_SZ,
'"%s" "%s\\Lib\\idlelib\\idle.pyw" -n -e "%%1"'
% (pythonw, target))
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("NoCon", EWI)),
"", 0, winreg.REG_SZ,
'"%s" "%s\\Lib\\idlelib\\idle.pyw" -n -e "%%1"'
% (pythonw, target))
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("", EWS)),
"", 0, winreg.REG_SZ, '"%s" "%%1"' % spyder)
winreg.SetValueEx(winreg.CreateKey(root, KEY_C2 % ("NoCon", EWS)),
"", 0, winreg.REG_SZ, '"%s" "%%1"' % spyder)
# Drop support
handler = "{60254CA5-953B-11CF-8C96-00AA00B8708C}"
for ftype in ("", "NoCon", "Compiled"):
winreg.SetValueEx(winreg.CreateKey(root, KEY_DROP1 % ftype),
"", 0, winreg.REG_SZ, handler)
# Icons
dlls = osp.join(target, 'DLLs')
winreg.SetValueEx(winreg.CreateKey(root, KEY_I % ""),
"", 0, winreg.REG_SZ, r'%s\py.ico' % dlls)
winreg.SetValueEx(winreg.CreateKey(root, KEY_I % "NoCon"),
"", 0, winreg.REG_SZ, r'%s\py.ico' % dlls)
winreg.SetValueEx(winreg.CreateKey(root, KEY_I % "Compiled"),
"", 0, winreg.REG_SZ, r'%s\pyc.ico' % dlls)
# Descriptions
winreg.SetValueEx(winreg.CreateKey(root, KEY_D % ""),
"", 0, winreg.REG_SZ, "Python File")
winreg.SetValueEx(winreg.CreateKey(root, KEY_D % "NoCon"),
"", 0, winreg.REG_SZ, "Python File (no console)")
winreg.SetValueEx(winreg.CreateKey(root, KEY_D % "Compiled"),
"", 0, winreg.REG_SZ, "Compiled Python File")
# PythonCore entries
'''
short_version = utils.get_python_infos(target)[0]
long_version = utils.get_python_long_version(target)
key_core = (KEY_S1 % short_version) + r'\\%s'
winreg.SetValueEx(winreg.CreateKey(root, key_core % 'InstallPath'),
"", 0, winreg.REG_SZ, target)
winreg.SetValueEx(winreg.CreateKey(root,
key_core % r'InstallPath\\InstallGroup'),
"", 0, winreg.REG_SZ, "Python %s" % short_version)
winreg.SetValueEx(winreg.CreateKey(root, key_core % 'Modules'),
"", 0, winreg.REG_SZ, "")
winreg.SetValueEx(winreg.CreateKey(root, key_core % 'PythonPath'),
"", 0, winreg.REG_SZ,
r"%s\\Lib;%s\\DLLs" % (target, target))
winreg.SetValueEx(winreg.CreateKey(root,
key_core % r'Help\\Main Python Documentation'),
"", 0, winreg.REG_SZ,
r"%s\\Doc\\python%s.chm" % (target, long_version))
'''
# Create start menu entries for all WinPython launchers
'''
for path, desc, fname in _get_shortcut_data(target, current=current):
utils.create_shortcut(path, desc, fname)
'''
# Register the Python ActiveX Scripting client (requires pywin32)
axscript = osp.join(target, 'Lib', 'site-packages', 'win32comext',
'axscript', 'client', 'pyscript.py')
if osp.isfile(axscript):
subprocess.call('"%s" "%s"' % (python, axscript), cwd=target)
else:
print('Unable to register ActiveX: please install pywin32',
file=sys.stderr)
'''
def unregister(target, current=True):
"""Unregister a Python distribution in Windows registry"""
# Registry entries
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
short_version = utils.get_python_infos(target)[0]
key_core = (KEY_S1 % short_version) + r'\\%s'
for key in (
# Drop support
KEY_DROP1 % "", KEY_DROP1 % "NoCon", KEY_DROP1 % "Compiled",
KEY_DROP0 % "", KEY_DROP0 % "NoCon", KEY_DROP0 % "Compiled",
# Icons
KEY_I % "NoCon", KEY_I % "Compiled", KEY_I % "",
# Edit with IDLE
KEY_C2 % ("", EWI), KEY_C2 % ("NoCon", EWI),
KEY_C1 % ("", EWI), KEY_C1 % ("NoCon", EWI),
# Edit with Spyder
KEY_C2 % ("", EWS), KEY_C2 % ("NoCon", EWS),
KEY_C1 % ("", EWS), KEY_C1 % ("NoCon", EWS),
# Verbs
KEY_C2 % ("", "open"),
KEY_C2 % ("NoCon", "open"),
KEY_C2 % ("Compiled", "open"),
KEY_C1 % ("", "open"),
KEY_C1 % ("NoCon", "open"),
KEY_C1 % ("Compiled", "open"),
KEY_C0 % "", KEY_C0 % "NoCon", KEY_C0 % "Compiled",
# Descriptions
KEY_D % "NoCon", KEY_D % "Compiled", KEY_D % "",
# PythonCore
key_core % r'InstallPath\\InstallGroup',
key_core % 'InstallPath',
key_core % 'Modules',
key_core % 'PythonPath',
key_core % r'Help\\Main Python Documentation',
key_core % 'Help',
KEY_S1 % short_version, KEY_S0, KEY_S,
):
try:
print(key)
winreg.DeleteKey(root, key)
except WindowsError:
rootkey = 'HKEY_CURRENT_USER' if current else 'HKEY_LOCAL_MACHINE'
print(r'Unable to remove %s\\%s' % (rootkey, key), file=sys.stderr)
# Start menu shortcuts
for path, desc, fname in _get_shortcut_data(target, current=current):
if osp.exists(fname):
os.remove(fname)
'''
|
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Reads a log generated by TemaTool, and prints some info about it.
Usage: logreader [logfile(s)] [options]
If no logfiles given, reads from standard input.
Examples:
Help:
logreader -h
Default info about logfile:
logreader file.log
Print info in gnuplot datafile format, with "sample rate" of 10 seconds:
logreader file.log --gnuplot -r 10s > datafile.plotdat
You can gnuplot the resulting datafile(s) with tema plotter script.
The plotter also reads stdin, so this is possible:
logreader file.log --gnuplot | plotter | gnuplot -persist
"""
# TODO? refactor?
# It'd be better to first do some kind of lexical reading to get the
# transitions etc in a more handlable form, and then another pass on those to
# extract the needed info. Now it's all done in one big mess...
# That'd make it easier to do some kind of analysis of simultaneously
# running (shared coverage) kinda test runs, for example.
# (Don't think anybody'd want to refactor this though :)
import sys
import re
import datetime
import time
import math
import os
import optparse
from copy import copy
import csv
def ratio(val, perVal, NA=None):
if perVal != 0:
return float(val)/perVal
else:
return NA
def percentage(val, perVal, NA=None):
r = ratio(val,perVal,NA)
return r*100.0 if r is not NA else NA
def sumOfValues(someDict):
return sum(someDict.values())
def numberOfNonZeros(someDict):
num = 0
for et in someDict.itervalues():
if et > 0:
num += 1
return num
def nonZeroValuesRatio(awDict):
if not awDict:
return 0 # ?
return float(numberOfNonZeros(awDict)) / len(awDict)
def timediffInMs(td):
return (td.days * 24 * 3600 + td.seconds) * 1000 \
+ td.microseconds / 1000
def groupByTimesExecuted(d):
counter = {}
for x,n in d.iteritems():
if n in counter:
counter[n].append(x)
else:
counter[n] = [x]
clist = [x for x in counter.iteritems()]
clist.sort()
return clist
def readSysArgs(argv):
usage = "%prog [logfile(s)] [options]"
description = "If no filenames are given or filename is -, reads from standard input"
def zekkaa(option, opt, value):
try:
num,unit = value[:-1],value[-1]
if unit not in "tsark%":
return float(value),"s"
elif unit == "%": return float(num),unit
elif unit == "s": return float(num),unit
elif unit == "a": return int(num),unit
elif unit == "r": return int(num),unit
elif unit == "k": return int(num),unit
elif unit == "t": return int(num),unit
except ValueError:
raise optparse.OptionValueError("Invalid datarate value: %s"%value)
class MyOption(optparse.Option):
TYPES = optparse.Option.TYPES + ("tsark%",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["tsark%"] = zekkaa
op = optparse.OptionParser(usage=usage, option_class=MyOption,description=description)
op.add_option("--verbose", action="store_true",
help="verbose")
op.add_option("--apps", action="store", type="str",
help="Application names. They're used to guess which aw belongs to which app.")
op.add_option("--neg", action="store_true",
help="negative action words separately")
op.add_option("--delay", action="store", type="float",
help="simulate delay for keyword execution (in seconds)")
op.add_option("--gen", action="store_true",
help="read the TaskSwitcherGEN and TargetSwitcher aw's too. they're ignored by default")
op.add_option("--comptrans", action="store", type="str",
help="Indices of components whose transitions to count.")
op.add_option("--awexec", action="store_true",
help="Print how many times each aw is executed.")
op.add_option("--gnuplot", action="store_true",
help="output in gnuplot datafile format.\
columns are described in comments of the datafile.")
op.add_option("--csv", action="store_true",
help="Output in csv format.\
First row has column names.")
op.add_option("-r", "--datarate", action="store", type="tsark%",
help="(with --gnuplot or --csv) determines the \"sample rate\" of datafile. Example values: 1.0s (seconds), 10a (action words), 10k (keywords), 1.5% (aw-coverage), 1000r (logfile rows), 1000t (transitions)")
op.add_option("-l", "--datalimit", action="store", type="tsark%",
help="stop after given amount of time/AW's/KW's/cov.\
Same format as in --datarate")
op.add_option("--combine", action="store_true",
help="Treat all the given logs as one big log.")
op.add_option("--title", action="store", type="str")
options, args = op.parse_args(argv[1:])
mutuallyExclusive = (('gnuplot','verbose'),
('gnuplot','awexec'),
('gnuplot','csv'),
('csv','verbose'),
('csv','awexec'))
for o1,o2 in mutuallyExclusive:
if getattr(options,o1) and getattr(options,o2):
op.error('options --%s and --%s are mutually exclusive' % (o1,o2))
# if no filename given, read from stdin
if len(args) == 0:
logfiles = [sys.stdin]
#elif len(args) == 1 and args[0] == "-":
# logfiles = [sys.stdin]
else:
try:
logfiles = [file(fn,'r') if fn!='-' else sys.stdin for fn in args]
except IOError,ioe:
op.error(ioe)
if options.delay:
delay = datetime.timedelta(seconds=options.delay)
options.delay = delay
return logfiles, options
# regex of end_aw
# 4 groups: start of aw, sign("~" or ""), end of aw, param
_RE_END_AW = re.compile("(\S*:)(~?)(end_aw[^\s<]*)((?:<[^>]*>)?)")
_RE_START_AW = re.compile("(\S*:)(~?)(start_aw[^\s<]*)((?:<[^>]*>)?)")
_RE_ACTIVATES = re.compile("Executing: (.*) ACTIVATES (.*)")
_RE_WAKE = re.compile("Executing: WAKEtsWAKE<(.*)>")
# 2 groups: (appname, list of aw's separated by ",")
#_RE_BENCHMARKED = re.compile("Benchmarked app: (.*); Action words: (.*)$")
_RE_BENCHMARKED = re.compile("Action words: (.*)$")
_TIME_FORMAT = "%m%d%H%M%S"
class LogFileError(Exception):
pass
def createTimeToStopFunc(unit,limit):
if unit == 'a': return lambda rdr: rdr._numAWsOfCurrFileOnly >= limit
elif unit == 'k': return lambda rdr: rdr._numKWsOfCurrFileOnly >= limit
elif unit == '%': return lambda rdr: rdr.awCoverage()*100.0 >= limit
elif unit == 's': return lambda rdr: rdr.elapsedMs()/1000 >= limit
elif unit == 'r': return lambda rdr: rdr._rowsReadOfCurrFileOnly >= limit
elif unit == 't': return lambda rdr: rdr._numTransOfCurrFileOnly >= limit
assert False
def createTimeToPrintFunc(unit):
if unit == 'a':
return lambda rdr,val: rdr._numAWsOfCurrFileOnly >= rdr.nextPrint
elif unit == 'k':
return lambda rdr: rdr._numKWsOfCurrFileOnly >= rdr.nextPrint
elif unit == '%':
return lambda rdr: rdr.awCoverage()*100.0 >= rdr.nextPrint
elif unit == 's':
return lambda rdr: rdr.elapsedMs()/1000 >= rdr.nextPrint
elif unit == 'r':
return lambda rdr: rdr._rowsReadOfCurrFileOnly >= rdr.nextPrint
elif unit == 't':
return lambda rdr: rdr._numTransOfCurrFileOnly >= rdr.nextPrint
assert False
class LogParser:
def __init__(self, options):
self.OPTIONS = options
if self.OPTIONS.datalimit:
limit, unit = self.OPTIONS.datalimit
self.timeToStopFunc = createTimeToStopFunc(unit,limit)
else:
self.timeToStopFunc = lambda self: False
if self.OPTIONS.datarate:
rate, unit = self.OPTIONS.datarate
self.timeToPrintFunc = createTimeToPrintFunc(unit)
else:
self.timeToPrintFunc = lambda self: True
if self.OPTIONS.apps:
self.appNames = self.OPTIONS.apps.strip().split()
else:
self.appNames = []
def _reset(self):
self._rowsRead = 0
self.dataRows = []
self._numKWs = 0
self._numAWs = 0 # number of end_aw executions
self.genAWs = set()
# Action word "AW" of app "AppName" has been executed
# *AWS_BY_APP["AppName"]["AW"] times.
# positive AWs here:
self._posAWsByApp = {}
for an in self.appNames:
self._posAWsByApp[an] = set()
# negative AWs here, only used if OPTIONS.neg is True:
if self.OPTIONS.neg:
self._negAWsByApp = {}
for an in self.appNames:
self._negAWsByApp[an] = set()
self._posAWs = None
self._negAWs = None
# key: state(tuple), value: how many times visited
self._states = {}
# transition = (state,kw/aw+params,state)
# key: transition(tuple), value: how many times visited
self._transitions = {}
self._numTrans = 0
# the transition that's been started but not finished
self._current_transition = [None]
# when the execution started
self._startTime = None
# the latest time
self._latestTime = None
# total delay of keyword executions so far
self._delayByNow = datetime.timedelta()
self._asps = set()
self._latestStartAW = None
self.switchTransitions = set()
self._currApp = None
self._switchFrom = None
self._switchFromState = None
self.numSwitches = 0
self.switchApps = []
self._device = None
# state length, for checking it doesn't increase because of a nondeterminism bug
self._stateLen = None
self._componenttransitions = set()
self._numComponenttransitions = 0
self._transitionsByComponent = {}
self._numTransitionsByComponent = {}
if self.OPTIONS.comptrans:
for i in [int(si) for si in self.OPTIONS.comptrans.strip().split()]:
self._transitionsByComponent[i] = set()
self._numTransitionsByComponent[i] = 0
self._resetCurrFile()
def _resetCurrFile(self):
self._startTime = None
self._latestTime = None
self._delayByNow = datetime.timedelta()
self._current_transition = [None]
self._currApp = None
self._rowsReadOfCurrFileOnly = 0
self._numKWsOfCurrFileOnly = 0
self._numAWsOfCurrFileOnly = 0
self._numTransOfCurrFileOnly = 0
def readLog(self,logfile):
self._reset()
try:
self._parseLogFile(logfile)
except LogFileError, lfe:
print >> sys.stderr, "Error when reading '%s'! (%i lines read)"%(logfile.name,self._rowsReadOfCurrFileOnly)
if str(lfe): print >> sys.stderr, "%s"%lfe
return False
finally:
logfile.close()
return True
def readLogsAsOne(self,logfiles):
self._reset()
for i,logfile in enumerate(logfiles):
try:
firstOne = (i == 0)
lastOne = (i == len(logfiles)-1)
self._resetCurrFile()
self._parseLogFile(logfile, firstOne, lastOne)
except LogFileError, lfe:
print >> sys.stderr, "Error when reading '%s'! (%i lines read)"%(logfile.name,self._rowsReadOfCurrFileOnly)
if str(lfe): print >> sys.stderr, "%s"%lfe
return False
finally:
logfile.close()
return True
def _parseLogFile(self, logfile,firstOfCombined=True,lastOfCombined=True):
"""Parses a log file.
Prints the results according to options given to constructor.
"""
self.filename = logfile.name
LINE_ACTIONWORDS = "Action words: "
LINE_EXECUTING = "TestEngine: Executing"
LINE_STATE = "TestEngine: New state: "
LINE_ASP = "(Non-SleepState) StateProps: "
headerHasToBePrinted = firstOfCombined
self.rowWriter = None
printDataRows = False
if self.OPTIONS.datarate:
num,unit = self.OPTIONS.datarate
self.nextPrint = num
nextPrintIncr = num
else:
self.nextPrint = None
if self.OPTIONS.gnuplot:
printDataHeader = self.printPlotDataComment
printDataRow = self.printPlotDataRow
printDataRows = True
elif self.OPTIONS.csv:
printDataHeader = self.printCSVDataHeader
printDataRow = self.printCSVDataRow
self.rowWriter = csv.writer(sys.stdout)
printDataRows = True
for line in logfile:
# Just storing, not parsing the timeStr here.
# It's parsed only when needed, to save time...
# (Parsing time string was one of the most time consuming parts,
# and it made reading very long logfiles slow...)
self.timeStr = line[:14]
if self._startTime is None:
self._startTime = self._getLatestTime()
start = time.time()
if line.find(LINE_ACTIONWORDS) != -1:
# Parse the list of all the aw's if not already.
# (To only do it once for multiple combined logs, etc.)
if self._posAWs is None:
self.parseActionWords(line)
elif line.find(LINE_STATE) != -1:
self.parseState(line)
elif line.find(LINE_ASP) != -1:
self.parseASP(line)
elif line.find(LINE_EXECUTING) != -1:
self.parseExecuting(line)
self._rowsRead += 1
self._rowsReadOfCurrFileOnly += 1
if self.timeToStopFunc(self):
break
if printDataRows:
try:
if headerHasToBePrinted:
printDataHeader()
headerHasToBePrinted = False
printDataRow()
elif self.timeToPrintFunc(self):
printDataRow()
if self.nextPrint is not None:
self.nextPrint += nextPrintIncr
except IOError:
pass # ?
if printDataRows and not headerHasToBePrinted:
printDataRow()
if lastOfCombined:
# empty line marks the end of data, for --gnuplot at least...
print
self.checkThatLogFileWasValid()
if not printDataRows and lastOfCombined:
self.printSummary()
def _getLatestTime(self):
try:
dateStr,msStr = self.timeStr.split('.')
except ValueError:
# The latest line doesn't contain time stamp.
# All the tema log lines do, but there could be some other lines
# in the logfile.
# Using the latest time.
return self._latestTime
# Python 2.4 doesn't have datetime.strptime function, but this does
# same thing
t = datetime.datetime(*(time.strptime(dateStr,_TIME_FORMAT)[0:6]))
micros = int(msStr) * 1000
self._latestTime = t.replace(microsecond=micros)
return self._latestTime
def elapsedMs(self):
execTime = self._getLatestTime()
elapsed = execTime - self._startTime + self._delayByNow
return timediffInMs(elapsed)
def parseActionWords(self,line):
"""Parses a "Action words:" line."""
words = _RE_BENCHMARKED.search(line).groups()[0]
if words:
AWs = words.split()
if not self.OPTIONS.gen:
AWs = [w for w in AWs if not self.isGenAW(w)]
self._posAWs = dict(zip(AWs,[0]*len(AWs)))
for aw in AWs:
app = self.appOfAW(aw)
if app is not None:
self._posAWsByApp[app].add(aw)
for appname,appAWs in self._posAWsByApp.iteritems():
if not appAWs:
raise LogFileError("There are no action words belonging to app '%s'!"%appname)
if self.OPTIONS.neg:
negAWs = [self.negAWfromAW(aw) for aw in AWs]
self._negAWs = dict(zip(negAWs,[0]*len(negAWs)))
for naw in negAWs:
app = self.appOfAW(naw)
if app is not None:
self._negAWsByApp[app].add(naw)
def parseState(self, line):
"""Parses a "New state" line."""
stateStr = line.split("TestEngine: New state: ")[-1].strip()
state = tuple([int(i) for i in stateStr[1:-1].split(', ')])
# the state looks like a python tuple, so we'll just exec it (slow?)
#exec "state = %s" % state
if self._stateLen is None:
self._stateLen = len(state)
elif self._stateLen != len(state):
raise LogFileError("State id lengths vary! Nondeterminism bug in the model?")
if self._switchFrom != self._currApp:
self.switchTransitions.add(
(self._switchFromState,self._switchFrom,
state, self._currApp))
self.switchApps.append((self._switchFrom, self._currApp))
self._switchFrom = self._currApp
self.numSwitches += 1
if self.OPTIONS.verbose:
print "*** SWITCHED FROM %s to %s" % self.switchApps[-1]
self.latest_state = state
if state in self._states:
self._states[state] += 1
else:
self._states[state] = 1
if len(self._current_transition) != 2:
raise LogFileError("Two states without execution in between. <<%s>>" % str(self._current_transition))
else:
self._current_transition.append(state)
self._current_transition = tuple(self._current_transition)
if self._current_transition in self._transitions:
self._transitions[self._current_transition] += 1
else:
self._transitions[self._current_transition] = 1
self._numTrans += 1
self._numTransOfCurrFileOnly += 1
action = self._current_transition[1]
# given componenttransitions by component
for i in self._transitionsByComponent:
if self._current_transition[0] is None:
break
s1 = self._current_transition[0][i]
s2 = self._current_transition[2][i]
if s1 != s2:
self._transitionsByComponent[i].add(
(s1,self._current_transition[1],s2))
self._numTransitionsByComponent[i] += 1
# all the componenttransitions
for i in range(self._stateLen):
if self._current_transition[0] is None:
break
s1 = self._current_transition[0][i]
s2 = self._current_transition[2][i]
if s1 != s2:
self._componenttransitions.add( (i,s1,action,s2) )
self._numComponenttransitions += 1
self._current_transition = [state]
def parseASP(self, line):
spStr = line.split('(Non-SleepState) StateProps: ',1)[1].strip()
if spStr:
sps = spStr[1:-1].split('" "')
for sp in sps:
self._asps.add( (self._latestStartAW,sp) )
def parseExecuting(self, line):
"""Parses an "Executing" line.
"""
executed_word = line.split('Executing: ',1)[-1].strip()
devicePrefixAndWord = executed_word.split("/",1)
if len(devicePrefixAndWord) > 1:
self._device = devicePrefixAndWord[0]
if len(self._current_transition) == 1:
self._current_transition.append(executed_word)
else:
raise LogFileError("Two executions without state in between. <<%s>>" % str(self._current_transition))
if 'ACTIVATES' in executed_word:
self.parseExecutingActivates(line)
elif ':start_aw' in executed_word:
self.parseExecutingStartAW(line)
elif ':end_aw' in executed_word or ':~end_aw' in executed_word:
self.parseExecutingEndAw(line)
else:
self.parseExecutingMisc(line)
def parseExecutingStartAW(self,line):
aw1,sign,aw2,param = _RE_START_AW.search(line).groups()
aw = aw1+aw2
self._latestStartAW = aw
def parseExecutingEndAw(self,line):
awBegin, tildeOrNot, awEnd, param = _RE_END_AW.search(line).groups()
if not self.OPTIONS.gen and self.isGenAW(awBegin+awEnd+param):
return
sign = (tildeOrNot == "")
theAWDict = self._posAWs # usually here...
if self.OPTIONS.neg:
aw = awBegin + tildeOrNot + awEnd + param
if sign == False:
#...except if it's a negative aw and they're treated separately
theAWDict = self._negAWs
else:
aw = awBegin + awEnd + param
self._numAWs += 1
self._numAWsOfCurrFileOnly += 1
isNewAW = not theAWDict[aw]
if isNewAW:
theAWDict[aw] = 1
else:
theAWDict[aw] += 1
if self.OPTIONS.verbose:
self.printExecAWRow(aw,sign,isNewAW)
def parseExecutingMisc(self,line):
# unique switches (S1,S2): S1 -> SLEEPts -> WAKEtsWAKE -> S2
if "Executing: SLEEPts" in line:
self._switchFromState = self.latest_state
elif "Executing: WAKEtsWAKE" in line:
if self._switchFromState is not None:
self._switchFrom = self._currApp
module = _RE_WAKE.search(line).groups()[0]
self._currApp = self.appOfModule(module)
elif self._currApp is not None:
# wake without sleep should only be possible at the start
raise LogFileError("WAKEtsWAKE in a wrong place")
# check if it's a kw execution, for which we may add delay
elif "Executing: kw_" in line or "Executing: ~kw_" in line:
self._numKWs += 1
self._numKWsOfCurrFileOnly += 1
if self.OPTIONS.delay:
self._delayByNow += self.OPTIONS.delay
def parseExecutingActivates(self,line):
"""Parses an "X ACTIVATES Y" line.
"""
a1, a2 = _RE_ACTIVATES.search(line).groups()
if "/" in a1:
# if there's a device name in the first, prepend it to the 2nd also
a2 = a1.split("/",1)[0] + "/" + a2
app1, app2 = None, None
for an in self.appNames:
if a1.startswith(an): app1 = an
if a2.startswith(an): app2 = an
if (app1 != app2):
self._switchFrom = app1
self._currApp = app2
self._switchFromState = self.latest_state
def appOfModule(self,module):
if self._device:
module = self._device+"/"+module
for an in self.appNames:
if module.startswith(an):
return an
return None
def appOfAW(self,aw):
for an in self.appNames:
if aw.startswith(an):
return an
return None
def negAWfromAW(self,aw):
"""Returns "negative" (~) version of aw."""
awBegin, sign, awEnd, param = _RE_END_AW.search(aw).groups()
return awBegin + "~" + awEnd + param
def awCoverage(self):
return nonZeroValuesRatio( self.allAWs() )
def awCoverageOfApp(self,appname):
return nonZeroValuesRatio( self.allAWsByApp(appname) )
#return nonZeroValuesRatio( self.allAWsByApp()[appname] )
def posAWsByApp(self,app):
d = {}
for pa in self._posAWsByApp[app]:
d[pa] = self._posAWs[pa]
return d
def negAWsByApp(self,app):
if not self.OPTIONS.neg:
return {}
d = {}
for pa in self._negAWsByApp[app]:
d[pa] = self._negAWs[pa]
return d
def allAWsByApp(self,app):
d = {}
for pa in self._posAWsByApp[app]:
d[pa] = self._posAWs[pa]
if self.OPTIONS.neg:
for pa in self._negAWsByApp[app]:
d[pa] = self._negAWs[pa]
return d
def awsByAppDict(self,appFunc):
d = {}
for an in self.appNames:
d[an] = appFunc(an)
return d
def allAWs(self):
if not self.OPTIONS.neg:
return self._posAWs
else:
aa = {}
aa.update(self._posAWs)
aa.update(self._negAWs)
return aa
def varianceOfAppCov(self):
if not self.appNames:
return 0
appCoverages = []
for app in self.appNames:
appCoverages.append( nonZeroValuesRatio(self.allAWsByApp(app)) )
n = len(appCoverages)
avg = sum(appCoverages) / float(n)
variance = (1/float(n))*sum([(y-avg)*(y-avg) for y in appCoverages])
return variance
def stdDevOfAppCov(self):
return math.sqrt( self.varianceOfAppCov() )
def isGenAW(self,line):
return ("TaskSwitcherGEN:end_awActivate" in line
or "TargetSwitcher:end_awActivate" in line
or "Synchronizer:end_aw" in line)
### print funcs:
def __getColumns(self):
return ("s", "kw", "aw", "awcov",
"switches", "uniqswitches",
"states", "uniqstates",
"transitions", "uniqtransitions",
"comptransitions", "uniqcomptransitions",
"asp",
"appcovstddev",
"leastcoveredapp")\
+ tuple(["awcov(%s)"%a for a in self.appNames])\
+ tuple(["comptrans(%i)"%i for i in self._transitionsByComponent])
def printPlotDataComment(self):
print "# Log file:", self.filename
print "# Title:", self.OPTIONS.title or self.filename
print "# Test ran:", self._startTime.strftime('????-%m-%d %H:%M:%S')
print "# Log read:", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print "# Apps:", " ".join(self.appNames)
cols = self.__getColumns()
self._colInd = dict( [(name,i) for i,name in enumerate(cols)] )
self._numCols = len(cols)
print "# Columns:",
for i,x in enumerate(cols):
print "%i:%s"% (i+1,x),
print
print
def printCSVDataHeader(self):
cols = self.__getColumns()
self._colInd = dict( [(name,i) for i,name in enumerate(cols)] )
self._numCols = len(cols)
self.rowWriter.writerow(cols)
def __getDataRow(self):
switches = self.numSwitches
states = sumOfValues(self._states)
uniqStates = numberOfNonZeros(self._states)
trans = sumOfValues(self._transitions)
uniqTrans = numberOfNonZeros(self._transitions)
appCoverages = []
for appname in self.appNames:
appCoverages.append( self.awCoverageOfApp(appname) )
compTransitions = []
for i,t in self._transitionsByComponent.iteritems():
compTransitions.append( len(t) )
uniqSwitches = len(self.switchTransitions)
if appCoverages:
covOfLeastCoveredApp = min(appCoverages)
else:
covOfLeastCoveredApp = 0
return((self.elapsedMs()/1000.0, self._numKWs, self._numAWs,
self.awCoverage(),
switches, uniqSwitches,
states, uniqStates,
trans, uniqTrans,
self._numComponenttransitions, len(self._componenttransitions),
len(self._asps),
self.stdDevOfAppCov(),
covOfLeastCoveredApp)
+
tuple(appCoverages)
+
(tuple(compTransitions))
)
def printPlotDataRow(self):
row = self.__getDataRow()
print " ".join([str(i) for i in row])
sys.stdout.flush()
self.dataRows.append(row)
def printCSVDataRow(self):
row = self.__getDataRow()
self.rowWriter.writerow(row)
sys.stdout.flush()
self.dataRows.append(row)
def checkThatLogFileWasValid(self):
sumStates = sumOfValues(self._states)
if sumStates == 0:
raise LogFileError("Not a single state found in the file.")
def printTitle(self):
print
title = "%s (%i rows read)" % (self.filename,self._rowsRead)
space = 60-len(title)
print " "*(space/2) + title + " "*(space/2)
print "-"*60
def printSummary(self):
self.printTitle()
print "Running time: %.0f s" % (self.elapsedMs() / 1000),
if self.OPTIONS.delay:
delayTotal = timediffInMs(self._delayByNow) / 1000.0
delayKw = timediffInMs(self.OPTIONS.delay) / 1000.0
print "(including delay %.2f s/KW * %i KW's = %.0fs)" %(delayKw,self._numKWs,delayTotal)
else:
print
print "-"*60
print "Total Action Word executions: %i" % self._numAWs
print "Total Keyword executions: %i" % self._numKWs
print "-"*60
if self.OPTIONS.apps:
if not self.OPTIONS.neg:
awsByApp = self.awsByAppDict(self.posAWsByApp)
awsByApp['Total'] = self.allAWs()
printAppTable(awsByApp)
else:
print "Positive AW's"
awsByApp = self.awsByAppDict(self.posAWsByApp)
awsByApp['Total'] = self._posAWs
printAppTable(awsByApp)
print "Negative(~) AW's"
awsByApp = self.awsByAppDict(self.negAWsByApp)
awsByApp['Total'] = self._negAWs
printAppTable(awsByApp)
print "All AW's"
awsByApp = self.awsByAppDict(self.allAWsByApp)
awsByApp['Total'] = self.allAWs()
printAppTable(awsByApp)
else:
if not self.OPTIONS.neg:
printTable([self.allAWs()])
else:
print "Positive AW's:\n"
printTable([self._posAWs])
print "Negative(~) AW's:\n"
printTable([self._negAWs])
print "Total AW's:\n"
printTable([self.allAWs()])
sumStates = sumOfValues(self._states)
sumTrans = sumOfValues(self._transitions)
numTrans = numberOfNonZeros(self._transitions)
uniqStates = numberOfNonZeros(self._states)
print "Transitions executed: %i" % sumTrans
print "Unique transitions: %i (%.2f%%)" % (numTrans, percentage(numTrans,sumTrans))
print "Unique states visited: %i (%.2f%%)" % (uniqStates, percentage(uniqStates,sumStates))
print "-"*60
sumCompTrans = self._numComponenttransitions
numCompTrans = len(self._componenttransitions)
print "Component transitions executed: %i" % sumCompTrans
print "Unique component transitions: %i (%.2f%%)" % (
numCompTrans, percentage(numCompTrans,sumCompTrans) )
print "-"*60
switches = self.numSwitches
if self.OPTIONS.apps:
print "App switches: %i" % switches
if switches:
uniqSwitches = len(self.switchTransitions)
print "Unique switches: %i (%.2f%%)"%(
uniqSwitches,100.0*uniqSwitches/switches)
print "-"*60
if self.OPTIONS.awexec:
for v,e in reversed(groupByTimesExecuted(self.allAWs())):
print
print "----- AW's executed %i times (%i): -----" % (v,len(e))
for x in e:
print x
def printExecAWRow(self, aw, sign, isNew):
s = "Executed"
if isNew:
s = "Executed NEW (%ith, %.2f%%)" % (
numberOfNonZeros(self.allAWs()),self.awCoverage()*100)
else: s += " old"
if sign: s += " aw:"
else: s += " ~aw: %s"
print s.ljust(35),aw
def printExecAWHeader():
print "-"*60
print "Action words:"
print "# Elapsed(ms) AW-cov(%) Action word"
print "-"*60
def printTable(awDicts, colNames=None, spacing=10):
if colNames is not None:
print " "*23,
print "".join([("%s"%cn[:spacing-1]).ljust(spacing) for cn in colNames])
print "AW's in the model: ",
print "".join([("%i"%len((t))).ljust(spacing) for t in awDicts])
print "AW executions: ",
print "".join([("%i"%sumOfValues(t)).ljust(spacing) for t in awDicts])
print "Unique AW's executed: ",
print "".join([("%i"%numberOfNonZeros(t)).ljust(spacing) for t in awDicts])
print "AW coverage: ",
print "".join([("%.2f%%"%(nonZeroValuesRatio(t)*100)).ljust(spacing) for t in awDicts])
print "-"*60
def printAppTable(awsByApp, title=None):
if title:
print title
awDicts = []
coltitles = []
for k,x in awsByApp.iteritems():
coltitles.append(k.split('/')[-1])
awDicts.append(x)
printTable(awDicts, coltitles)
# main
def main(argv):
logfiles, options = readSysArgs(argv)
parser = LogParser(options)
try:
if options.combine:
parser.readLogsAsOne(logfiles)
else:
for logfile in logfiles:
parser.readLog(logfile)
except KeyboardInterrupt:
return False
return True
if __name__ == "__main__":
ok = main(sys.argv)
sys.exit(0 if ok else -1)
|
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import re
import sys
import shlex
import subprocess
import multiprocessing
import string
try:
import threading
except ImportError:
import dummy_threading as threading
class NativeLib:
def __init__ (self, apiVersion, abiVersion, prebuiltDir):
self.apiVersion = apiVersion
self.abiVersion = abiVersion
self.prebuiltDir = prebuiltDir
def __str__ (self):
return "(API: %s, ABI: %s)" % (self.apiVersion, self.abiVersion)
def __repr__ (self):
return "(API: %s, ABI: %s)" % (self.apiVersion, self.abiVersion)
def getPlatform ():
if sys.platform.startswith('linux'):
return 'linux'
else:
return sys.platform
def selectByOS (variants):
platform = getPlatform()
if platform in variants:
return variants[platform]
elif 'other' in variants:
return variants['other']
else:
raise Exception("No configuration for '%s'" % platform)
def isExecutable (path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def which (binName):
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
fullPath = os.path.join(path, binName)
if isExecutable(fullPath):
return fullPath
return None
def isBinaryInPath (binName):
return which(binName) != None
def selectFirstExistingBinary (filenames):
for filename in filenames:
if filename != None and isExecutable(filename):
return filename
return None
def selectFirstExistingDir (paths):
for path in paths:
if path != None and os.path.isdir(path):
return path
return None
def die (msg):
print msg
exit(-1)
def shellquote(s):
return '"%s"' % s.replace('\\', '\\\\').replace('"', '\"').replace('$', '\$').replace('`', '\`')
def execute (commandLine):
args = shlex.split(commandLine)
retcode = subprocess.call(args)
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (commandLine, retcode))
def execArgs (args):
# Make sure previous stdout prints have been written out.
sys.stdout.flush()
retcode = subprocess.call(args)
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (str(args), retcode))
def execArgsInDirectory (args, cwd, linePrefix="", failOnNonZeroExit=True):
def readApplyPrefixAndPrint (source, prefix, sink):
while True:
line = source.readline()
if len(line) == 0: # EOF
break;
sink.write(prefix + line)
process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutJob = threading.Thread(target=readApplyPrefixAndPrint, args=(process.stdout, linePrefix, sys.stdout))
stderrJob = threading.Thread(target=readApplyPrefixAndPrint, args=(process.stderr, linePrefix, sys.stderr))
stdoutJob.start()
stderrJob.start()
retcode = process.wait()
if failOnNonZeroExit and retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (str(args), retcode))
def serialApply(f, argsList):
for args in argsList:
f(*args)
def parallelApply(f, argsList):
class ErrorCode:
def __init__ (self):
self.error = None;
def applyAndCaptureError (func, args, errorCode):
try:
func(*args)
except:
errorCode.error = sys.exc_info()
errorCode = ErrorCode()
jobs = []
for args in argsList:
job = threading.Thread(target=applyAndCaptureError, args=(f, args, errorCode))
job.start()
jobs.append(job)
for job in jobs:
job.join()
if errorCode.error:
raise errorCode.error[0], errorCode.error[1], errorCode.error[2]
class Device:
def __init__(self, serial, product, model, device):
self.serial = serial
self.product = product
self.model = model
self.device = device
def __str__ (self):
return "%s: {product: %s, model: %s, device: %s}" % (self.serial, self.product, self.model, self.device)
def getDevices (adb):
proc = subprocess.Popen([adb, 'devices', '-l'], stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise Exception("adb devices -l failed, got %d" % proc.returncode)
ptrn = re.compile(r'^([a-zA-Z0-9\.:]+)\s+.*product:([^\s]+)\s+model:([^\s]+)\s+device:([^\s]+)')
devices = []
for line in stdout.splitlines()[1:]:
if len(line.strip()) == 0:
continue
m = ptrn.match(line)
if m == None:
print "WARNING: Failed to parse device info '%s'" % line
continue
devices.append(Device(m.group(1), m.group(2), m.group(3), m.group(4)))
return devices
def getWin32Generator ():
if which("jom.exe") != None:
return "NMake Makefiles JOM"
else:
return "NMake Makefiles"
def isNinjaSupported ():
return which("ninja") != None
def getUnixGenerator ():
if isNinjaSupported():
return "Ninja"
else:
return "Unix Makefiles"
def getExtraBuildArgs (generator):
if generator == "Unix Makefiles":
return ["--", "-j%d" % multiprocessing.cpu_count()]
else:
return []
NDK_HOST_OS_NAMES = [
"windows",
"windows-x86_64",
"darwin-x86",
"darwin-x86_64",
"linux-x86",
"linux-x86_64"
]
def getNDKHostOsName (ndkPath):
for name in NDK_HOST_OS_NAMES:
if os.path.exists(os.path.join(ndkPath, "prebuilt", name)):
return name
raise Exception("Couldn't determine NDK host OS")
# deqp/android path
ANDROID_DIR = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
# Build configuration
NATIVE_LIBS = [
# API ABI prebuiltsDir
NativeLib(13, "armeabi-v7a", 'android-arm'), # ARM v7a ABI
NativeLib(13, "x86", 'android-x86'), # x86
NativeLib(21, "arm64-v8a", 'android-arm64'), # ARM64 v8a ABI
NativeLib(21, "x86_64", 'android-x86_64'), # x86_64
]
ANDROID_JAVA_API = "android-22"
NATIVE_LIB_NAME = "libdeqp.so"
def makeNdkVersionString (version):
minorVersionString = (chr(ord('a') + version[1]) if version[1] > 0 else "")
return "r%d%s" % (version[0], minorVersionString)
ANDROID_NDK_VERSION = (11,0)
ANDROID_NDK_VERSION_STRING = makeNdkVersionString(ANDROID_NDK_VERSION)
def selectNDKPath ():
candidates = [
os.path.expanduser("~/android-ndk-" + ANDROID_NDK_VERSION_STRING),
"C:/android/android-ndk-" + ANDROID_NDK_VERSION_STRING,
os.environ.get("ANDROID_NDK_PATH", None), # If not defined, return None
]
ndkPath = selectFirstExistingDir(candidates)
if ndkPath == None:
raise Exception("None of NDK directory candidates exist: %s. Check ANDROID_NDK_PATH in common.py" % candidates)
return ndkPath
def noneSafePathJoin (*components):
if None in components:
return None
return os.path.join(*components)
# NDK paths
ANDROID_NDK_PATH = selectNDKPath()
ANDROID_NDK_HOST_OS = getNDKHostOsName(ANDROID_NDK_PATH)
ANDROID_NDK_TOOLCHAIN_VERSION = "r11" # Toolchain file is selected based on this
# Native code build settings
CMAKE_GENERATOR = selectByOS({
'win32': getWin32Generator(),
'other': getUnixGenerator()
})
EXTRA_BUILD_ARGS = getExtraBuildArgs(CMAKE_GENERATOR)
# SDK paths
ANDROID_SDK_PATH = selectFirstExistingDir([
os.environ.get("ANDROID_SDK_PATH", None),
os.path.expanduser("~/android-sdk-linux"),
os.path.expanduser("~/android-sdk-mac_x86"),
"C:/android/android-sdk-windows",
])
ANDROID_BIN = selectFirstExistingBinary([
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "android"),
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "android.bat"),
which('android'),
])
ADB_BIN = selectFirstExistingBinary([
which('adb'), # \note Prefer adb in path to avoid version issues on dev machines
noneSafePathJoin(ANDROID_SDK_PATH, "platform-tools", "adb"),
noneSafePathJoin(ANDROID_SDK_PATH, "platform-tools", "adb.exe"),
])
ZIPALIGN_BIN = selectFirstExistingBinary([
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "zipalign"),
noneSafePathJoin(ANDROID_SDK_PATH, "tools", "zipalign.exe"),
which('zipalign'),
])
JARSIGNER_BIN = which('jarsigner')
# Apache ant
ANT_BIN = selectFirstExistingBinary([
which('ant'),
"C:/android/apache-ant-1.8.4/bin/ant.bat",
"C:/android/apache-ant-1.9.2/bin/ant.bat",
"C:/android/apache-ant-1.9.3/bin/ant.bat",
"C:/android/apache-ant-1.9.4/bin/ant.bat",
])
def makeNameValueTuple (name):
return (name, str(eval(name)))
CONFIG_VAR_NAMES = [
"ANDROID_DIR",
"NATIVE_LIBS",
"ANDROID_JAVA_API",
"NATIVE_LIB_NAME",
"ANDROID_NDK_PATH",
"ANDROID_NDK_HOST_OS",
"ANDROID_NDK_TOOLCHAIN_VERSION",
"CMAKE_GENERATOR",
"EXTRA_BUILD_ARGS",
"ANDROID_SDK_PATH",
"ANDROID_BIN",
"ADB_BIN",
"ZIPALIGN_BIN",
"JARSIGNER_BIN",
"ANT_BIN",
]
CONFIG_STRINGS = [makeNameValueTuple(x) for x in CONFIG_VAR_NAMES]
|
|
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import logging
import random
import threading
import time
import typing
from typing import Dict, Iterable, Optional, Union
try:
from collections.abc import KeysView
KeysView[None] # KeysView is only subscriptable in Python 3.9+
except TypeError:
# Deprecated since Python 3.9, thus only use as a fallback in older Python versions
from typing import KeysView
from google.cloud.pubsub_v1.subscriber._protocol import requests
if typing.TYPE_CHECKING: # pragma: NO COVER
from google.cloud.pubsub_v1.subscriber._protocol.streaming_pull_manager import (
StreamingPullManager,
)
_LOGGER = logging.getLogger(__name__)
_LEASE_WORKER_NAME = "Thread-LeaseMaintainer"
class _LeasedMessage(typing.NamedTuple):
sent_time: float
"""The local time when ACK ID was initially leased in seconds since the epoch."""
size: int
ordering_key: Optional[str]
class Leaser(object):
def __init__(self, manager: "StreamingPullManager"):
self._thread: Optional[threading.Thread] = None
self._manager = manager
# a lock used for start/stop operations, protecting the _thread attribute
self._operational_lock = threading.Lock()
# A lock ensuring that add/remove operations are atomic and cannot be
# intertwined. Protects the _leased_messages and _bytes attributes.
self._add_remove_lock = threading.Lock()
# Dict of ack_id -> _LeasedMessage
self._leased_messages: Dict[str, _LeasedMessage] = {}
self._bytes = 0
"""The total number of bytes consumed by leased messages."""
self._stop_event = threading.Event()
@property
def message_count(self) -> int:
"""The number of leased messages."""
return len(self._leased_messages)
@property
def ack_ids(self) -> KeysView[str]:
"""The ack IDs of all leased messages."""
return self._leased_messages.keys()
@property
def bytes(self) -> int:
"""The total size, in bytes, of all leased messages."""
return self._bytes
def add(self, items: Iterable[requests.LeaseRequest]) -> None:
"""Add messages to be managed by the leaser."""
with self._add_remove_lock:
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not in self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
sent_time=float("inf"),
size=item.byte_size,
ordering_key=item.ordering_key,
)
self._bytes += item.byte_size
else:
_LOGGER.debug("Message %s is already lease managed", item.ack_id)
def start_lease_expiry_timer(self, ack_ids: Iterable[str]) -> None:
"""Start the lease expiry timer for `items`.
Args:
items: Sequence of ack-ids for which to start lease expiry timers.
"""
with self._add_remove_lock:
for ack_id in ack_ids:
lease_info = self._leased_messages.get(ack_id)
# Lease info might not exist for this ack_id because it has already
# been removed by remove().
if lease_info:
self._leased_messages[ack_id] = lease_info._replace(
sent_time=time.time()
)
def remove(
self,
items: Iterable[
Union[requests.AckRequest, requests.DropRequest, requests.NackRequest]
],
) -> None:
"""Remove messages from lease management."""
with self._add_remove_lock:
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0
def maintain_leases(self) -> None:
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
# Also update the deadline currently used if enough new ACK data has been
# gathered since the last deadline update.
deadline = self._manager._obtain_ack_deadline(maybe_update=True)
_LOGGER.debug("The current deadline value is %d seconds.", deadline)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are beyond the max lease time. This ensures
# that in the event of a badly behaving actor, we can drop messages
# and allow the Pub/Sub server to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size, item.ordering_key)
for ack_id, item in leased_messages.items()
if item.sent_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
assert self._manager.dispatcher is not None
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a modack request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
assert self._manager.dispatcher is not None
ack_id_gen = (ack_id for ack_id in ack_ids)
self._manager._send_lease_modacks(ack_id_gen, deadline)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, deadline * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
def start(self) -> None:
with self._operational_lock:
if self._thread is not None:
raise ValueError("Leaser is already running.")
# Create and start the helper thread.
self._stop_event.clear()
thread = threading.Thread(
name=_LEASE_WORKER_NAME, target=self.maintain_leases
)
thread.daemon = True
thread.start()
_LOGGER.debug("Started helper thread %s", thread.name)
self._thread = thread
def stop(self) -> None:
with self._operational_lock:
self._stop_event.set()
if self._thread is not None:
# The thread should automatically exit when the consumer is
# inactive.
self._thread.join()
self._thread = None
|
|
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for HDS HNAS NFS storage.
"""
import os
import time
from xml.etree import ElementTree as ETree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.hds import hnas_backend
from cinder.volume.drivers import nfs
from cinder.volume import utils
from cinder.volume import volume_types
HDS_HNAS_NFS_VERSION = '3.0.0'
LOG = logging.getLogger(__name__)
NFS_OPTS = [
cfg.StrOpt('hds_hnas_nfs_config_file',
default='/opt/hds/hnas/cinder_nfs_conf.xml',
help='Configuration file for HDS NFS cinder plugin'), ]
CONF = cfg.CONF
CONF.register_opts(NFS_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'ssh_port': '22'}
def _xml_read(root, element, check=None):
"""Read an xml element.
:param root: XML object
:param element: string desired tag
:param check: string if present, throw exception if element missing
"""
try:
val = root.findtext(element)
LOG.info(_LI("%(element)s: %(val)s"), {'element': element, 'val': val})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_LE("XML exception reading parameter: %s"), element)
else:
LOG.info(_LI("XML exception reading parameter: %s"), element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file.
:param xml_config_file: string filename containing XML configuration
"""
if not os.access(xml_config_file, os.R_OK):
msg = (_("Can't open config file: %s") % xml_config_file)
raise exception.NotFound(message=msg)
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
msg = (_("Error parsing config file: %s") % xml_config_file)
raise exception.ConfigNotFound(message=msg)
# mandatory parameters
config = {}
arg_prereqs = ['mgmt_ip0', 'username']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters
opt_parameters = ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0']
for req in opt_parameters:
config[req] = _xml_read(root, req)
if config['ssh_enabled'] == 'True':
config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', 'check')
config['password'] = _xml_read(root, 'password')
config['ssh_port'] = _xml_read(root, 'ssh_port')
if config['ssh_port'] is None:
config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
else:
# password is mandatory when not using SSH
config['password'] = _xml_read(root, 'password', 'check')
if config['hnas_cmd'] is None:
config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']
config['hdp'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in ['volume_type', 'hdp']:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
# at least one service required!
if config['services'].keys() is None:
raise exception.ParameterNotFound(param="No service found")
return config
def factory_bend(drv_config):
"""Factory over-ride in self-tests."""
return hnas_backend.HnasBackend(drv_config)
class HDSNFSDriver(nfs.NfsDriver):
"""Base class for Hitachi NFS driver.
Executes commands relating to Volumes.
Version 1.0.0: Initial driver version
Version 2.2.0: Added support to SSH authentication
"""
def __init__(self, *args, **kwargs):
# NOTE(vish): db is set by Manager
self._execute = None
self.context = None
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(NFS_OPTS)
self.config = _read_config(
self.configuration.hds_hnas_nfs_config_file)
super(HDSNFSDriver, self).__init__(*args, **kwargs)
self.bend = factory_bend(self.config)
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hnas_cmd'],
HDS_HNAS_NFS_VERSION,
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
inf = out.split()
return inf[1], 'nfs_' + inf[1], inf[6]
def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database.
:param volume_id: string volume id
"""
vol = self.db.volume_get(self.context, volume_id)
return vol
def _get_service(self, volume):
"""Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference
"""
LOG.debug("_get_service: volume: %s", volume)
label = utils.extract_host(volume['host'], level='pool')
if label in self.config['services'].keys():
svc = self.config['services'][label]
LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
{'lbl': label, 'svc': svc['fslabel']})
service = (svc['hdp'], svc['path'], svc['fslabel'])
else:
LOG.info(_LI("Available services: %s"),
self.config['services'].keys())
LOG.error(_LE("No configuration found for service: %s"),
label)
raise exception.ParameterNotFound(param=label)
return service
def set_execute(self, execute):
self._execute = execute
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
nfs_mount = self._get_provider_location(volume['id'])
path = self._get_volume_path(nfs_mount, volume['name'])
# Resize the image file on share to new size.
LOG.debug("Checking file for resize")
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_LI("Resizing file to %sG"), new_size)
image_utils.resize_image(path, new_size)
if self._is_file_size_equal(path, new_size):
LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
{'id': volume['id'], 'size': new_size})
return
else:
raise exception.InvalidResults(
_("Resizing image file failed."))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
return False
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug("create_volume_from %s", volume)
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
msg = _("Cannot create volume of size %(vol_size)s from "
"snapshot of size %(snap_size)s")
msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(snapshot['name'],
volume['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
return {'provider_location': share}
def create_snapshot(self, snapshot):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
"""
self._clone_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
LOG.debug('Share: %s', share)
# returns the mount point (not path)
return {'provider_location': share}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: dictionary snapshot reference
"""
nfs_mount = self._get_provider_location(snapshot['volume_id'])
if self._volume_not_present(nfs_mount, snapshot['name']):
return True
self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']),
run_as_root=True)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>.
:param volume_id: string volume id
"""
nfs_server_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
return nfs_server_ip + ':' + export_path
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume.
:param volume_id: string volume id
"""
volume = self.db.volume_get(self.context, volume_id)
# same format as _get_volume_location
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume.
:param volume_id: string volume id
"""
return self._get_provider_location(volume_id).split(':')[1]
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists.
:param volume_name: string volume name
"""
try:
self._try_execute('ls', self._get_volume_path(nfs_mount,
volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs
share.
:param nfs_share string, example 172.18.194.100:/var/nfs
:param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: dictionary volume reference
:param src_vref: dictionary src_vref reference
"""
vol_size = volume['size']
src_vol_size = src_vref['size']
if vol_size != src_vol_size:
msg = _("Cannot create clone of size %(vol_size)s from "
"volume of size %(src_vol_size)s")
msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
raise exception.CinderException(msg % msg_fmt)
self._clone_volume(src_vref['name'], volume['name'], src_vref['id'])
share = self._get_volume_location(src_vref['id'])
return {'provider_location': share}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
if 'refresh' is True, update the stats first.
"""
_stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
_stats["vendor_name"] = 'HDS'
_stats["driver_version"] = HDS_HNAS_NFS_VERSION
_stats["storage_protocol"] = 'NFS'
for pool in self.pools:
capacity, free, used = self._get_capacity_info(pool['hdp'])
pool['total_capacity_gb'] = capacity / float(units.Gi)
pool['free_capacity_gb'] = free / float(units.Gi)
pool['allocated_capacity_gb'] = used / float(units.Gi)
pool['QoS_support'] = 'False'
pool['reserved_percentage'] = 0
_stats['pools'] = self.pools
LOG.info(_LI('Driver stats: %s'), _stats)
return _stats
def _get_nfs_info(self):
out = self.bend.get_nfs_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
# dict based on NFS exports addresses
conf = {}
for line in lines:
if 'Export' in line:
inf = line.split()
(export, path, fslabel, hdp, ip1) = \
inf[1], inf[3], inf[5], inf[7], inf[11]
# 9, 10, etc are IP addrs
key = ip1 + ':' + export
conf[key] = {}
conf[key]['path'] = path
conf[key]['hdp'] = hdp
conf[key]['fslabel'] = fslabel
msg = _("nfs_info: %(key)s: %(path)s, HDP: \
%(fslabel)s FSID: %(hdp)s")
LOG.info(msg, {'key': key, 'path': path, 'fslabel': fslabel,
'hdp': hdp})
return conf
def do_setup(self, context):
"""Perform internal driver setup."""
self.context = context
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
LOG.info(_LI("Review shares: %s"), self.shares)
nfs_info = self._get_nfs_info()
LOG.debug("nfs_info: %s", nfs_info)
for share in self.shares:
if share in nfs_info.keys():
LOG.info(_LI("share: %(share)s -> %(info)s"),
{'share': share, 'info': nfs_info[share]['path']})
for svc in self.config['services'].keys():
if share == self.config['services'][svc]['hdp']:
self.config['services'][svc]['path'] = \
nfs_info[share]['path']
# don't overwrite HDP value
self.config['services'][svc]['fsid'] = \
nfs_info[share]['hdp']
self.config['services'][svc]['fslabel'] = \
nfs_info[share]['fslabel']
LOG.info(_LI("Save service info for"
" %(svc)s -> %(hdp)s, %(path)s"),
{'svc': svc, 'hdp': nfs_info[share]['hdp'],
'path': nfs_info[share]['path']})
break
if share != self.config['services'][svc]['hdp']:
LOG.error(_LE("NFS share %(share)s has no service entry:"
" %(svc)s -> %(hdp)s"),
{'share': share, 'svc': svc,
'hdp': self.config['services'][svc]['hdp']})
raise exception.ParameterNotFound(param=svc)
else:
LOG.info(_LI("share: %s incorrect entry"), share)
LOG.debug("self.config['services'] = %s", self.config['services'])
service_list = self.config['services'].keys()
for svc in service_list:
svc = self.config['services'][svc]
pool = {}
pool['pool_name'] = svc['volume_type']
pool['service_label'] = svc['volume_type']
pool['hdp'] = svc['hdp']
self.pools.append(pool)
LOG.info(_LI("Configured pools: %s"), self.pools)
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume using the HNAS file_clone.
:param volume_name: string volume name
:param clone_name: string clone name (or snapshot)
:param volume_id: string volume id
"""
export_path = self._get_export_path(volume_id)
# volume-ID snapshot-ID, /cinder
LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s"
" export_path %(epath)s"), {'vname': volume_name,
'cname': clone_name,
'epath': export_path})
source_vol = self._id_to_vol(volume_id)
# sps; added target
(_hdp, _path, _fslabel) = self._get_service(source_vol)
target_path = '%s/%s' % (_path, clone_name)
source_path = '%s/%s' % (_path, volume_name)
out = self.bend.file_clone(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
_fslabel, source_path, target_path)
return out
def get_pool(self, volume):
if not volume['volume_type']:
return 'default'
else:
metadata = {}
type_id = volume['volume_type_id']
if type_id is not None:
metadata = volume_types.get_volume_type_extra_specs(type_id)
if not metadata.get('service_label'):
return 'default'
else:
if metadata['service_label'] not in \
self.config['services'].keys():
return 'default'
else:
return metadata['service_label']
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
self._ensure_shares_mounted()
(_hdp, _path, _fslabel) = self._get_service(volume)
volume['provider_location'] = _hdp
LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"),
{'label': _fslabel, 'loc': volume['provider_location']})
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
|
|
from inspect import getargspec, getmembers, isfunction, ismethod
from types import MethodType
from ipykernel.ipkernel import IPythonKernel
from promise import Promise
from . import jsonrpc
from .config import Config
from .jsonrpc import (is_request,
is_response,
json_rpc_request,
json_rpc_result)
from .layers import (AnnotationLayer,
GeonotebookLayerCollection,
NoDataLayer,
SimpleLayer,
TimeSeriesLayer,
VectorLayer)
from .utils import get_kernel_id
from .wrappers import RasterData, RasterDataCollection, VectorData
class Remote(object):
"""Provides an object that proxies procedures on a remote object.
This takes a list of protocol definitions and dynamically generates methods
on the object that reflect that protocol. These methods wrap Promises
which manage the reply and error callbacks of a remote proceedure call.
Remote defines a '_promises' variable which is a dict of message id's to
Promises.
"""
def validate(self, protocol, *args, **kwargs):
"""Validate a protocol definition.
:param protocol: Dict containing a single function's protocol
:returns: Nothing
:rtype: None
"""
assert len(args) >= len(protocol["required"]), \
"Protocol {} has an arity of {}. Called with {}".format(
protocol['procedure'], len(protocol["required"]), len(args))
assert len(args) <= len(protocol["required"]) + \
len(protocol["optional"]), \
"Protocol {} has an arity of {}. Called with {}".format(
protocol['procedure'], len(protocol["required"]), len(args))
def _make_protocol_method(self, protocol):
"""Make a method closure based on a protocol definition.
This takes a protocol and generates a closure that has the same arity
as the protocol. The closure is dynamically set as a method on the
Remote object with the same name as protocol. This makes it possible
to do:
Geonotebook._remote.set_center(-74.25, 40.0, 4)
which will validate the arguments, create a JSONRPC request object,
generate a Promise and store it in the _promises dict.
e.g:
def handle_error(error):
print "JSONError (%s): %s" % (error['code'], error['message'])
def handle_reply(result):
print(result)
def handle_callback_error(error):
print "Callback Error: %s" % error[0]
Geonotebook._remote.set_center(-74.25, 40.0, 4).then(
handle_reply, handle_error).catch(handle_callback_error)
:param protocol: a protocol dict
:returns: a closure that validates RPC arguments and returns a Promise
:rtype: MethodType
"""
assert 'required' in protocol, \
"protocol {} must define required arguments".format(
protocol['procedure'])
assert 'optional' in protocol, \
"protocol {} must define optional arguments".format(
protocol['procedure'])
for arg in protocol["required"]:
assert 'key' in arg, \
"protocol {} is malformed, argument {} " + \
"does not have a key".format(
protocol['procedure'], arg)
for arg in protocol["optional"]:
assert 'key' in arg, \
"protocol {} is malformed, argument {} " + \
"does not have a key".format(
protocol['procedure'], arg)
def _protocol_closure(self, *args, **kwargs):
try:
self.validate(protocol, *args, **kwargs)
except Exception as e:
# TODO: log something here
raise e
def make_param(key, value, required=True):
return {'key': key, 'value': value, 'required': required}
# Get the parameters
params = [
make_param(k['key'], v) for k, v in zip(
protocol['required'], args)
]
# Not technically available until ES6
params.extend([
make_param(k['key'], kwargs[k['key']], required=False)
for k in protocol['optional'] if k['key'] in kwargs
])
# Create the message
msg = json_rpc_request(protocol['procedure'], params)
# Set up the callback
self._promises[msg['id']] = Promise()
self._send_msg(msg)
# return the callback
return self._promises[msg['id']]
return MethodType(_protocol_closure, self)
def resolve(self, msg):
"""Resolve an open JSONRPC request.
Takes a JSONRPC result message and passes it to either the
on_fulfilled handler or the on_rejected handler of the Promise.
:param msg: JSONRPC result message
:returns: Nothing
:rtype: None
"""
if msg['id'] in self._promises:
try:
if msg['error'] is not None:
self._promises[msg['id']].reject(Exception(msg['error']))
else:
self._promises[msg['id']].fulfill(msg['result'])
except Exception as e:
raise e
else:
self.log.warn("Could not find promise with id %s" % msg['id'])
def __init__(self, transport, protocol):
"""Initialize the Remote object.
:param transport: function that takes a JSONRPC request message
:param protocol: A list of protocol definitions for remote functions
:returns: Nothing
:rtype: None
"""
self._promises = {}
self._send_msg = transport
self.protocol = protocol
for p in self.protocol:
assert 'procedure' in p, \
""
setattr(self, p['procedure'], self._make_protocol_method(p))
class Geonotebook(object):
msg_types = ['get_protocol', 'set_center', 'add_annotation_from_client',
'get_map_state']
_protocol = None
_remote = None
@classmethod
def class_protocol(cls):
"""Initialize the RPC protocol description.
Provides a static, lazy loaded description of the functions that
are available to be called by the RPC mechanism.
:param cls: The class (e.g. Geonotebook)
:returns: the protocol description
:rtype: dict
"""
if cls._protocol is None:
def _method_protocol(fn, method):
spec = getargspec(method)
# spec.args[1:] so we don't include 'self'
params = spec.args[1:]
# The number of optional arguments
d = len(spec.defaults) if spec.defaults is not None else 0
# The number of required arguments
r = len(params) - d
def make_param(p, default=False):
return {'key': p, 'default': default}
# Would be nice to include whether or to expect a reply, or
# If this is just a notification function
return {'procedure': fn,
'required': [make_param(p) for p in params[:r]],
'optional': [make_param(p, default=dd) for p, dd
in zip(params[r:], spec.defaults)]
if spec.defaults is not None else []}
# Note: for the predicate we do ismethod or isfunction for
# PY2/PY3 support
# See: https://docs.python.org/3.0/whatsnew/3.0.html
# "The concept of "unbound methods" has been removed from the
# language.
# When referencing a method as a class attribute, you now get a
# plain function object."
cls._protocol = {
fn: _method_protocol(fn, method) for fn, method in
getmembers(
cls,
predicate=lambda x: ismethod(x) or isfunction(x)
) if fn in cls.msg_types}
return cls._protocol.values()
def _send_msg(self, msg):
"""Send a message to the client.
'msg' should be a well formed RPC message.
:param msg: The RPC message
:returns: Nothing
:rtype: None
"""
self._kernel.comm.send(msg)
def _reconcile_parameters(self, method, params):
param_hash = {p['key']: p for p in params}
# Loop through protocol reconciling parameters
# from out of the param_hash. Note - does not do
# any error checking - exceptions will be caught
# and transformed into RPC errors
try:
args = [param_hash[p['key']]['value']
for p in self._protocol[method]['required']]
except KeyError:
raise jsonrpc.InvalidParams(
u"missing required params for method: %s" % method
)
kwargs = {p['key']: param_hash[p['key']]['value']
for p in self._protocol[method]['optional']
if p['key'] in param_hash}
return args, kwargs
def _recv_msg(self, msg):
"""Recieve an RPC message from the client.
:param msg: An RPC message
:returns: Nothing
:rtype: None
"""
# If this is a response, pass it along to the Remote object to be
# processesd by the correct reply/error handler
if is_response(msg):
self._remote.resolve(msg)
# Otherwise process the request from the remote RPC client.
elif is_request(msg):
method, params = msg['method'], msg['params']
if method in self._protocol.keys():
try:
args, kwargs = self._reconcile_parameters(method, params)
result = getattr(self, method)(*args, **kwargs)
self._send_msg(json_rpc_result(result, None, msg['id']))
except Exception as e:
if isinstance(e, jsonrpc.JSONRPCError):
raise e
else:
raise jsonrpc.ServerError(str(e))
else:
raise jsonrpc.MethodNotFound("Method not allowed")
else:
raise jsonrpc.ParseError("Could not parse msg: %s" % msg)
@property
def log(self):
return self._kernel.log
def __init__(self, kernel, *args, **kwargs):
self.view_port = None
self.x = None
self.y = None
self.z = None
self.layers = GeonotebookLayerCollection([])
self._kernel = kernel
@property
def kernel_id(self):
return get_kernel_id(self._kernel)
def serialize(self):
ret = {}
if self.x and self.y and self.z:
ret['center'] = [self.x, self.y, self.z]
ret['layers'] = self.layers.serialize()
return ret
def rpc_error(self, error):
try:
self.log.error(
"JSONRPCError (%s): %s" % (error['code'], error['message'])
)
except Exception:
self.log.error(
"JSONRPCError: Malformed error message: {}".format(error)
)
def callback_error(self, exception):
import sys
import traceback
t, v, tb = sys.exc_info()
self.log.error('Callback Error: \n%s' %
''.join(traceback.format_exception(t, v, tb)))
# Remote RPC wrappers #
def set_center(self, x, y, z):
def _set_center(result):
self.x, self.y, self.z = result
return self._remote.set_center(x, y, z)\
.then(_set_center, self.rpc_error).catch(self.callback_error)
def get_map_state(self):
return self.serialize()
def add_layer(self, data, name=None, vis_url=None, **kwargs):
# Create the GeonotebookLayer - if vis_url is none, this will take
# data_path and upload it to the configured vis_server, this will make
# the visualization url available through the 'vis_url' attribute
# on the layer object.
# Make sure we pass in kernel_id to the layer, then to the vis_server
# Otherwise we cant generate the coorect vis_url.
layer_type = kwargs.get('layer_type', None)
kwargs['kernel_id'] = self.kernel_id
if layer_type != 'annotation':
kwargs['zIndex'] = len(self.layers)
# HACK: figure out a way to do this without so many conditionals
if isinstance(data, RasterData):
# TODO verify layer exists in geoserver?
name = data.name if name is None else name
layer = SimpleLayer(
name, self._remote, data=data, vis_url=vis_url, **kwargs
)
elif isinstance(data, RasterDataCollection):
assert name is not None, \
RuntimeError("RasterDataCollection layers require a 'name'")
layer = TimeSeriesLayer(
name, self._remote, data=data, vis_url=vis_url, **kwargs
)
elif isinstance(data, VectorData):
layer = VectorLayer(
name, self._remote, self.layers, data=data, **kwargs
)
else:
assert name is not None, \
RuntimeError("Non data layers require a 'name'")
if layer_type == 'annotation':
layer = AnnotationLayer(
name, self._remote, self.layers, **kwargs
)
else:
layer = NoDataLayer(
name, self._remote, vis_url=vis_url, **kwargs
)
def _add_layer(layer_name):
self.layers.append(layer)
return self._remote.add_layer(layer.name, layer.vis_url,
layer.vis_options.serialize(),
layer.query_params) \
.then(_add_layer, self.rpc_error) \
.catch(self.callback_error)
def remove_layer(self, layer_name):
# If layer_name is an object with a 'name' attribute we assume
# thats the layer you want removed. This allows us to pass in
# GeonotebookLayer objects, as well as regular string layer names
if hasattr(layer_name, 'name'):
layer_name = layer_name.name
def _remove_layer(layer_name):
self.layers.remove(layer_name)
cb = self._remote.remove_layer(layer_name).then(
_remove_layer, self.rpc_error).catch(self.callback_error)
return cb
# RPC endpoints #
def get_protocol(self):
return self.__class__.class_protocol()
def add_annotation(self, ann_type, coords, meta=None):
"""Add an annotation to the annotation layer.
:param str ann_type: 'point', 'rectangle', or 'polygon'
:param list[dict] coords: A list of coordinates defining the annotation
:param dict meta: Extra metadata stored with the annotation
"""
def _add_annotation(response):
meta.update(response)
self.add_annotation_from_client(ann_type, coords, meta)
return True
meta = meta or {}
return self._remote.add_annotation(
ann_type, [coords], meta
).then(
_add_annotation,
self.rpc_error
).catch(self.callback_error)
def add_annotation_from_client(self, ann_type, coords, meta):
"""Add an existing annotation to the map state.
This method is not intended to be called by the user. It
exists to append an annotation initialized on the client
to the server map state.
"""
self.layers.annotation.add_annotation(ann_type, coords, meta)
return True
class GeonotebookKernel(IPythonKernel):
def _unwrap(self, msg):
"""Unwrap a Comm message.
Remove the Comm envolpe and return an RPC message
:param msg: the Comm message
:returns: An RPC message
:rtype: dict
"""
return msg['content']['data']
def handle_comm_msg(self, message):
"""Handle incomming comm messages.
:param msg: a Comm message
:returns: Nothing
:rtype: None
"""
msg = self._unwrap(message)
try:
self.geonotebook._recv_msg(msg)
except jsonrpc.JSONRPCError as e:
self.geonotebook._send_msg(
json_rpc_result(None, e.tojson(), msg['id'])
)
self.log.error(u"JSONRPCError (%s): %s" % (e.code, e.message))
except Exception as e:
self.log.error(u"Error processing msg: {}".format(str(e)))
def handle_comm_open(self, comm, msg):
"""Handle opening a comm.
:param comm: The comm to open
:param msg: The initial comm_open message
:returns: Nothing
:rtype: None
"""
self.comm = comm
self.comm.on_msg(self.handle_comm_msg)
# TODO: Check if the msg is empty - no protocol - die
self.geonotebook._remote = Remote(self.comm.send, self._unwrap(msg))
# Reply to the open comm, this should probably be set up on
# self.geonotebook._remote as an actual proceedure call
self.comm.send({
"method": "set_protocol",
"data": self.geonotebook.get_protocol()
})
# THis should be handled in a callback that is fired off
# When set protocol etc is complete.
if self.initializing:
basemap = Config().basemap
self.geonotebook.add_layer(
None, name="osm_base", layer_type="osm",
vis_url=basemap["url"],
system_layer=True,
attribution=basemap["attribution"])
self.geonotebook.add_layer(
None, name="annotation",
layer_type="annotation", vis_url=None,
system_layer=True, expose_as="annotation")
self.initializing = False
def do_shutdown(self, restart):
self.geonotebook = None
super(GeonotebookKernel, self).do_shutdown(restart)
config = Config()
config.vis_server.shutdown_kernel(self)
if restart:
self.geonotebook = Geonotebook(self)
self.shell.user_ns.update({'M': self.geonotebook})
def start(self):
self.geonotebook = Geonotebook(self)
self.shell.user_ns.update({'M': self.geonotebook})
super(GeonotebookKernel, self).start()
config = Config()
self.log.setLevel(config.log_level)
config.vis_server.start_kernel(self)
def __init__(self, **kwargs):
self.log = kwargs['log']
self.initializing = True
super(GeonotebookKernel, self).__init__(**kwargs)
self.comm_manager.register_target('geonotebook', self.handle_comm_open)
|
|
# -*- coding: utf-8 -*-
'''
:copyright: (c) 2015 by Allenta Consulting S.L. <info@allenta.com>.
:license: BSD, see LICENSE.txt for more details.
'''
from __future__ import absolute_import
import json
from contextlib import closing
from datetime import datetime
from requests import Session, Request
from requests.exceptions import RequestException
from django.conf import settings
def restricted(f):
def wrapper(self, *args, **kwargs):
if self.cookie is None:
raise VAC.AuthenticationException(
'Failed to execute VAC request: user is not authenticated.')
return f(self, *args, **kwargs)
return wrapper
class VAC(object):
COOKIE_NAME = 'JSESSIONID'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_TIMEOUT = 5 # Seconds.
class Exception(Exception):
pass
class AuthenticationException(Exception):
pass
class CacheGroup(object):
def __init__(self, spec):
self.id = spec['_id']['$oid']
self.name = spec['name']
self.active_vcl = \
spec['activeVCL']['$id']['$oid'] \
if 'activeVCL' in spec else None
self.created_at = datetime.strptime(
spec['created']['$date'], VAC.DATETIME_FORMAT)
self.updated_at = datetime.strptime(
spec['timestamp']['$date'], VAC.DATETIME_FORMAT)
class VCL(object):
def __init__(self, spec):
self.id = spec['_id']['$oid']
self.name = spec['name']
self.user = spec['createdByUser']['$id']['$oid']
self.created_at = datetime.strptime(
spec['created']['$date'], VAC.DATETIME_FORMAT)
self.updated_at = datetime.strptime(
spec['timestamp']['$date'], VAC.DATETIME_FORMAT)
class VCLCommit(object):
def __init__(self, spec):
self.id = spec['_id']['$oid']
self.branch = spec['branch']['$id']['$oid']
self.content = spec['content']
self.is_head = spec['head']
self.user = spec['committedByUser']['$id']['$oid']
self.created_at = datetime.strptime(
spec['created']['$date'], VAC.DATETIME_FORMAT)
self.updated_at = datetime.strptime(
spec['timestamp']['$date'], VAC.DATETIME_FORMAT)
def __init__(self, cookie=None):
self.cookie = cookie
self._local_cache = {}
def flush_local_cache(self):
'''Clean up the local cache.
Responses to GET requests are stored in a local cache to minimize the
requests sent to the VAC. Whenever a successful login or logout
operation is done this method is automatically called.
'''
self._local_cache = {}
def login(self, username, password):
'''Try to start a new authenticated session.'''
if self.cookie is None:
response = self._execute('POST', '/api/rest/login', data={
'username': username,
'password': password,
})
if response.status_code == 200 and \
self.COOKIE_NAME in response.cookies:
self.cookie = response.cookies[self.COOKIE_NAME]
self.flush_local_cache()
return self.cookie is not None
def logout(self):
'''Close the current session.'''
if self.cookie is not None:
self._execute('POST', '/api/rest/logout', codes=[200])
self.cookie = None
self.flush_local_cache()
def validate_session(self):
'''Check the current session is still valid.'''
if self.cookie is not None:
response = self._execute('POST', '/api/rest/checkcookie')
return response.status_code == 200
return False
@restricted
def groups(self):
'''Get the list of groups.'''
response = self._execute('GET', '/api/v1/group', codes=[200])
return map(VAC.CacheGroup, json.loads(response.text)['list'])
@restricted
def group(self, group_id):
'''Get a group by its id.'''
response = self._execute(
'GET', '/api/v1/group/%(group_id)s' % {
'group_id': group_id,
}, codes=[200, 404])
if response.status_code == 200:
return VAC.CacheGroup(json.loads(response.text))
else:
return None
@restricted
def vcl(self, vcl_id):
'''Get a VCL (branch) by its id.'''
response = self._execute(
'GET', '/api/v1/vcl/%(vcl_id)s' % {
'vcl_id': vcl_id,
}, codes=[200, 404])
if response.status_code == 200:
return VAC.VCL(json.loads(response.text))
else:
return None
@restricted
def vcl_head(self, vcl_id):
'''Get the current head (VCL commit) of a given VCL (branch).'''
response = self._execute(
'GET', '/api/v1/vcl/%(vcl_id)s/head' % {
'vcl_id': vcl_id,
}, codes=[200, 404])
if response.status_code == 200:
return VAC.VCLCommit(json.loads(response.text))
else:
return None
@restricted
def vcl_push(self, vcl_id, vcl_content, group_id=None, fallback_commit=None):
'''Push a new VCL commit to a given VCL (branch).'''
response = self._execute(
'POST', '/api/v1/vcl/%(vcl_id)s/push' % {
'vcl_id': vcl_id,
},
codes=[200, 400],
data=vcl_content,
headers={'Content-Type': 'text/plain'})
success = (response.status_code == 200)
if success:
# Optional: try to force group to reload the current head of the
# VCL branch immediately.
if group_id:
self._execute(
'PUT', '/api/v1/group/%(group_id)s/vcl/%(vcl_id)s/deploy' % {
'group_id': group_id,
'vcl_id': vcl_id,
},
codes=[200, 204])
else:
# Optional: upon failure, rollback to a given VCL commit.
if fallback_commit:
self._execute(
'POST', '/api/v1/vcl/%(vcl_id)s/push/%(vcl_commit_id)s' % {
'vcl_id': vcl_id,
'vcl_commit_id': fallback_commit,
},
codes=[200, 400])
parsed_response = json.loads(response.text)
return {
'success': success,
'message': parsed_response['message'],
'vcl': VAC.VCLCommit(parsed_response),
}
def _execute(self, method, path, codes=None, **request_kwargs):
try:
request = Request(
method, settings.VAC_API + path, **request_kwargs)
with closing(Session()) as session:
# Add session cookie if user is authenticated.
if self.cookie is not None:
session.cookies[self.COOKIE_NAME] = self.cookie
response = None
# Try with local cache if this is a GET request.
if method == 'GET':
response = self._local_cache.get(path)
# No cached response? Send request.
if response is None:
response = session.send(
session.prepare_request(request),
stream=False,
timeout=self.DEFAULT_TIMEOUT)
# Store response in the local cache if this is a GET
# request.
if method == 'GET':
self._local_cache[path] = response
# Check response status code is in the list of valid codes
# if any was supplied.
if codes is not None and response.status_code not in codes:
# Unauthorized: raise a VAC.AuthenticationException.
if response.status_code == 401:
raise VAC.AuthenticationException(
'Failed to execute VAC request [%(path)s]: user '
'is not authenticated.' % {
'path': path,
}
)
# Other unexpected codes: raise a VAC.Exception.
else:
raise VAC.Exception(
'Unexpected VAC response code (%(code)d) '
'[%(path)s]:\n%(text)s' % {
'code': response.status_code,
'path': path,
'text': response.text,
}
)
# Done!
return response
# Unexpected error communicating with the VAC: raise a VAC.Exception.
except RequestException as e:
raise VAC.Exception(
'Failed to execute VAC request [%(path)s]:\n%(message)s' % {
'path': path,
'message': e.message,
}
)
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running backend tests in parallel.
This should not be run directly. Instead, navigate to the oppia/ folder and
execute:
bash scripts/run_backend_tests.sh
"""
import argparse
import datetime
import os
import re
import subprocess
import threading
import time
# DEVELOPERS: Please change this number accordingly when new tests are added
# or removed.
EXPECTED_TEST_COUNT = 621
COVERAGE_PATH = os.path.join(
os.getcwd(), '..', 'oppia_tools', 'coverage-4.0', 'coverage')
TEST_RUNNER_PATH = os.path.join(os.getcwd(), 'core', 'tests', 'gae_suite.py')
LOG_LOCK = threading.Lock()
ALL_ERRORS = []
# This should be the same as core.test_utils.LOG_LINE_PREFIX.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--generate_coverage_report',
help='optional; if specified, generates a coverage report',
action='store_true')
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=str)
_PARSER.add_argument(
'--test_path',
help='optional subdirectory path containing the test(s) to run',
type=str)
def log(message, show_time=False):
"""Logs a message to the terminal.
If show_time is True, prefixes the message with the current time.
"""
with LOG_LOCK:
if show_time:
print datetime.datetime.utcnow().strftime('%H:%M:%S'), message
else:
print message
def run_shell_cmd(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""Runs a shell command and captures the stdout and stderr output.
If the cmd fails, raises Exception. Otherwise, returns a string containing
the concatenation of the stdout and stderr logs.
"""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout_str, last_stderr_str = p.communicate()
last_stdout = last_stdout_str.split('\n')
if LOG_LINE_PREFIX in last_stdout_str:
log('')
for line in last_stdout:
if line.startswith(LOG_LINE_PREFIX):
log('INFO: %s' % line[len(LOG_LINE_PREFIX): ])
log('')
result = '%s%s' % (last_stdout_str, last_stderr_str)
if p.returncode != 0:
raise Exception('Error %s\n%s' % (p.returncode, result))
return result
class TaskThread(threading.Thread):
"""Runs a task in its own thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.output = None
self.exception = None
self.name = name
self.finished = False
def run(self):
try:
self.output = self.func()
log('FINISHED %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
except Exception as e:
self.exception = e
if 'KeyboardInterrupt' not in str(self.exception):
log('ERROR %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
class TestingTaskSpec(object):
"""Executes a set of tests given a test class name."""
def __init__(self, test_target, generate_coverage_report):
self.test_target = test_target
self.generate_coverage_report = generate_coverage_report
def run(self):
"""Runs all tests corresponding to the given test target."""
test_target_flag = '--test_target=%s' % self.test_target
if self.generate_coverage_report:
exc_list = [
'python', COVERAGE_PATH, 'run', '-p', TEST_RUNNER_PATH,
test_target_flag]
else:
exc_list = ['python', TEST_RUNNER_PATH, test_target_flag]
return run_shell_cmd(exc_list)
def _check_all_tasks(tasks):
"""Checks the results of all tasks."""
running_tasks_data = []
for task in tasks:
if task.isAlive():
running_tasks_data.append(' %s (started %s)' % (
task.name,
time.strftime('%H:%M:%S', time.localtime(task.start_time))
))
if task.exception:
ALL_ERRORS.append(task.exception)
if running_tasks_data:
log('----------------------------------------')
log('Tasks still running:')
for task_details in running_tasks_data:
log(task_details)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = [] + tasks
currently_running_tasks = set([])
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in list(currently_running_tasks):
task.join(1)
if not task.isAlive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.pop()
currently_running_tasks.add(task)
task.start()
task.start_time = time.time()
time.sleep(5)
if remaining_tasks:
log('----------------------------------------')
log('Number of unstarted tasks: %s' % len(remaining_tasks))
_check_all_tasks(tasks)
log('----------------------------------------')
def _get_all_test_targets(test_path=None):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _convert_to_test_target(path):
"""Remove the .py suffix and replace all slashes with periods."""
return os.path.relpath(path, os.getcwd())[:-3].replace('/', '.')
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
for root in os.listdir(base_path):
if any([s in root for s in ['.git', 'third_party', 'core/tests']]):
continue
if root.endswith('_test.py'):
result.append(_convert_to_test_target(
os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
for f in files:
if (f.endswith('_test.py') and
os.path.join('core', 'tests') not in subroot):
result.append(_convert_to_test_target(
os.path.join(subroot, f)))
return result
def main():
"""Run the tests."""
parsed_args = _PARSER.parse_args()
if parsed_args.test_target and parsed_args.test_path:
raise Exception('At most one of test_path and test_target '
'should be specified.')
if parsed_args.test_path and '.' in parsed_args.test_path:
raise Exception('The delimiter in test_path should be a slash (/)')
if parsed_args.test_target and '/' in parsed_args.test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
if parsed_args.test_target:
all_test_targets = [parsed_args.test_target]
else:
all_test_targets = _get_all_test_targets(
test_path=parsed_args.test_path)
# Prepare tasks.
task_to_taskspec = {}
tasks = []
for test_target in all_test_targets:
test = TestingTaskSpec(
test_target, parsed_args.generate_coverage_report)
task = TaskThread(test.run, name=test_target)
task_to_taskspec[task] = test
tasks.append(task)
task_execution_failed = False
try:
_execute_tasks(tasks)
except:
task_execution_failed = True
for task in tasks:
if task.exception:
log(str(task.exception))
print ''
print '+------------------+'
print '| SUMMARY OF TESTS |'
print '+------------------+'
print ''
# Check we ran all tests as expected.
total_count = 0
total_errors = 0
total_failures = 0
for task in tasks:
spec = task_to_taskspec[task]
if not task.finished:
print 'CANCELED %s' % spec.test_target
test_count = 0
elif 'No tests were run' in str(task.exception):
print 'ERROR %s: No tests found.' % spec.test_target
test_count = 0
elif task.exception:
exc_str = str(task.exception).decode('utf-8')
print exc_str[exc_str.find('=') : exc_str.rfind('-')]
tests_failed_regex_match = re.search(
r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
'([0-9]+) failures',
str(task.exception))
try:
test_count = int(tests_failed_regex_match.group(1))
errors = int(tests_failed_regex_match.group(2))
failures = int(tests_failed_regex_match.group(3))
total_errors += errors
total_failures += failures
print 'FAILED %s: %s errors, %s failures' % (
spec.test_target, errors, failures)
except AttributeError:
# There was an internal error, and the tests did not run. (The
# error message did not match `tests_failed_regex_match`.)
test_count = 0
print ''
print '------------------------------------------------------'
print ' WARNING: FAILED TO RUN TESTS.'
print ''
print ' This is most likely due to an import error.'
print '------------------------------------------------------'
else:
tests_run_regex_match = re.search(
r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
test_count = int(tests_run_regex_match.group(1))
test_time = float(tests_run_regex_match.group(2))
print ('SUCCESS %s: %d tests (%.1f secs)' %
(spec.test_target, test_count, test_time))
total_count += test_count
print ''
if total_count == 0:
raise Exception('WARNING: No tests were run.')
elif (parsed_args.test_path is None and parsed_args.test_target is None
and total_count != EXPECTED_TEST_COUNT):
raise Exception(
'ERROR: Expected %s tests to be run, not %s.' %
(EXPECTED_TEST_COUNT, total_count))
else:
print 'Ran %s test%s in %s test class%s.' % (
total_count, '' if total_count == 1 else 's',
len(tasks), '' if len(tasks) == 1 else 'es')
if total_errors or total_failures:
print '(%s ERRORS, %s FAILURES)' % (total_errors, total_failures)
else:
print 'All tests passed.'
if task_execution_failed:
raise Exception('Task execution failed.')
elif total_errors or total_failures:
raise Exception(
'%s errors, %s failures' % (total_errors, total_failures))
if __name__ == '__main__':
main()
|
|
# pylint: disable=too-many-lines
# TODO de-clutter this file!
# pylint: disable=unused-import
from typing import (Any, Callable, Dict, List, Tuple, Optional, Union,
Iterable, Set)
# pylint: enable=unused-import
import time
import collections
import re
from datetime import timedelta
import numpy as np
import tensorflow as tf
from termcolor import colored
from typeguard import check_argument_types, check_type
from neuralmonkey.logging import log, log_print, warn, notice
from neuralmonkey.dataset import Dataset, LazyDataset
from neuralmonkey.tf_manager import TensorFlowManager
from neuralmonkey.runners.base_runner import BaseRunner, ExecutionResult
from neuralmonkey.trainers.generic_trainer import GenericTrainer
# pylint: disable=invalid-name
Evaluation = Dict[str, float]
SeriesName = str
EvalConfiguration = List[Union[Tuple[SeriesName, Any],
Tuple[SeriesName, SeriesName, Any]]]
Postprocess = Optional[List[Tuple[SeriesName, Callable]]]
# pylint: enable=invalid-name
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
# pylint: disable=too-many-statements, too-many-nested-blocks
def training_loop(tf_manager: TensorFlowManager,
epochs: int,
trainer: GenericTrainer, # TODO better annotate
batch_size: int,
log_directory: str,
evaluators: EvalConfiguration,
runners: List[BaseRunner],
train_dataset: Dataset,
val_dataset: Union[Dataset, List[Dataset]],
test_datasets: Optional[List[Dataset]] = None,
logging_period: Union[str, int] = 20,
validation_period: Union[str, int] = 500,
val_preview_input_series: Optional[List[str]] = None,
val_preview_output_series: Optional[List[str]] = None,
val_preview_num_examples: int = 15,
train_start_offset: int = 0,
runners_batch_size: Optional[int] = None,
initial_variables: Optional[Union[str, List[str]]] = None,
postprocess: Postprocess = None) -> None:
"""Execute the training loop for given graph and data.
Args:
tf_manager: TensorFlowManager with initialized sessions.
epochs: Number of epochs for which the algoritm will learn.
trainer: The trainer object containg the TensorFlow code for computing
the loss and optimization operation.
batch_size: number of examples in one mini-batch
log_directory: Directory where the TensordBoard log will be generated.
If None, nothing will be done.
evaluators: List of evaluators. The last evaluator is used as the main.
An evaluator is a tuple of the name of the generated
series, the name of the dataset series the generated one is
evaluated with and the evaluation function. If only one
series names is provided, it means the generated and
dataset series have the same name.
runners: List of runners for logging and evaluation runs
train_dataset: Dataset used for training
val_dataset: used for validation. Can be Dataset or a list of datasets.
The last dataset is used as the main one for storing best results.
When using multiple datasets. It is recommended to name them for
better Tensorboard visualization.
test_datasets: List of datasets used for testing
logging_period: after how many batches should the logging happen. It
can also be defined as a time period in format like: 3s; 4m; 6h;
1d; 3m15s; 3seconds; 4minutes; 6hours; 1days
validation_period: after how many batches should the validation happen.
It can also be defined as a time period in same format as logging
val_preview_input_series: which input series to preview in validation
val_preview_output_series: which output series to preview in validation
val_preview_num_examples: how many examples should be printed during
validation
train_start_offset: how many lines from the training dataset should be
skipped. The training starts from the next batch.
runners_batch_size: batch size of runners. It is the same as batch_size
if not specified
initial_variables: variables used for initialization, for example for
continuation of training
postprocess: A function which takes the dataset with its output series
and generates additional series from them.
"""
check_argument_types()
if isinstance(val_dataset, Dataset):
val_datasets = [val_dataset]
else:
val_datasets = val_dataset
log_period_batch, log_period_time = _resolve_period(logging_period)
val_period_batch, val_period_time = _resolve_period(validation_period)
_check_series_collisions(runners, postprocess)
_log_model_variables(var_list=trainer.var_list)
if runners_batch_size is None:
runners_batch_size = batch_size
evaluators = [(e[0], e[0], e[1]) if len(e) == 2 else e
for e in evaluators]
if evaluators:
main_metric = "{}/{}".format(evaluators[-1][0],
evaluators[-1][-1].name)
else:
main_metric = "{}/{}".format(runners[-1].decoder_data_id,
runners[-1].loss_names[0])
if not tf_manager.minimize_metric:
raise ValueError("minimize_metric must be set to True in "
"TensorFlowManager when using loss as "
"the main metric")
step = 0
seen_instances = 0
last_seen_instances = 0
if initial_variables is None:
# Assume we don't look at coder checkpoints when global
# initial variables are supplied
tf_manager.initialize_model_parts(
runners + [trainer], save=True) # type: ignore
else:
try:
tf_manager.restore(initial_variables)
except tf.errors.NotFoundError:
warn("Some variables were not found in checkpoint.)")
if log_directory:
log("Initializing TensorBoard summary writer.")
tb_writer = tf.summary.FileWriter(
log_directory, tf_manager.sessions[0].graph)
log("TensorBoard writer initialized.")
log("Starting training")
last_log_time = time.process_time()
last_val_time = time.process_time()
interrupt = None
try:
for epoch_n in range(1, epochs + 1):
log_print("")
log("Epoch {} starts".format(epoch_n), color="red")
train_dataset.shuffle()
train_batched_datasets = train_dataset.batch_dataset(batch_size)
if epoch_n == 1 and train_start_offset:
if not isinstance(train_dataset, LazyDataset):
warn("Not skipping training instances with "
"shuffled in-memory dataset")
else:
_skip_lines(train_start_offset, train_batched_datasets)
for batch_n, batch_dataset in enumerate(train_batched_datasets):
step += 1
seen_instances += len(batch_dataset)
if _is_logging_time(step, log_period_batch,
last_log_time, log_period_time):
trainer_result = tf_manager.execute(
batch_dataset, [trainer], train=True,
summaries=True)
train_results, train_outputs = run_on_dataset(
tf_manager, runners, batch_dataset,
postprocess, write_out=False,
batch_size=runners_batch_size)
# ensure train outputs are iterable more than once
train_outputs = {k: list(v) for k, v
in train_outputs.items()}
train_evaluation = evaluation(
evaluators, batch_dataset, runners,
train_results, train_outputs)
_log_continuous_evaluation(
tb_writer, main_metric, train_evaluation,
seen_instances, epoch_n, epochs, trainer_result,
train=True)
last_log_time = time.process_time()
else:
tf_manager.execute(batch_dataset, [trainer],
train=True, summaries=False)
if _is_logging_time(step, val_period_batch,
last_val_time, val_period_time):
log_print("")
val_duration_start = time.process_time()
val_examples = 0
for val_id, valset in enumerate(val_datasets):
val_examples += len(valset)
val_results, val_outputs = run_on_dataset(
tf_manager, runners, valset,
postprocess, write_out=False,
batch_size=runners_batch_size)
# ensure val outputs are iterable more than once
val_outputs = {k: list(v)
for k, v in val_outputs.items()}
val_evaluation = evaluation(
evaluators, valset, runners, val_results,
val_outputs)
valheader = ("Validation (epoch {}, batch number {}):"
.format(epoch_n, batch_n))
log(valheader, color="blue")
_print_examples(
valset, val_outputs, val_preview_input_series,
val_preview_output_series,
val_preview_num_examples)
log_print("")
log(valheader, color="blue")
# The last validation set is selected to be the main
if val_id == len(val_datasets) - 1:
this_score = val_evaluation[main_metric]
tf_manager.validation_hook(this_score, epoch_n,
batch_n)
if this_score == tf_manager.best_score:
best_score_str = colored(
"{:.4g}".format(tf_manager.best_score),
attrs=["bold"])
# store also graph parts
all_coders = set.union(
*[rnr.all_coders
for rnr in runners
+ [trainer]]) # type: ignore
for coder in all_coders:
for session in tf_manager.sessions:
coder.save(session)
else:
best_score_str = "{:.4g}".format(
tf_manager.best_score)
log("best {} on validation: {} (in epoch {}, "
"after batch number {})"
.format(main_metric, best_score_str,
tf_manager.best_score_epoch,
tf_manager.best_score_batch),
color="blue")
v_name = valset.name if len(val_datasets) > 1 else None
_log_continuous_evaluation(
tb_writer, main_metric, val_evaluation,
seen_instances, epoch_n, epochs, val_results,
train=False, dataset_name=v_name)
# how long was the training between validations
training_duration = val_duration_start - last_val_time
val_duration = time.process_time() - val_duration_start
# the training should take at least twice the time of val.
steptime = (training_duration
/ (seen_instances - last_seen_instances))
valtime = val_duration / val_examples
last_seen_instances = seen_instances
log("Validation time: {:.2f}s, inter-validation: {:.2f}s, "
"per-instance (train): {:.2f}s, per-instance (val): "
"{:.2f}s".format(val_duration, training_duration,
steptime, valtime), color="blue")
if training_duration < 2 * val_duration:
notice("Validation period setting is inefficient.")
log_print("")
last_val_time = time.process_time()
except KeyboardInterrupt as ex:
interrupt = ex
log("Training finished. Maximum {} on validation data: {:.4g}, epoch {}"
.format(main_metric, tf_manager.best_score,
tf_manager.best_score_epoch))
if test_datasets:
tf_manager.restore_best_vars()
for dataset in test_datasets:
test_results, test_outputs = run_on_dataset(
tf_manager, runners, dataset, postprocess,
write_out=True, batch_size=runners_batch_size)
# ensure test outputs are iterable more than once
test_outputs = {k: list(v) for k, v in test_outputs.items()}
eval_result = evaluation(evaluators, dataset, runners,
test_results, test_outputs)
print_final_evaluation(dataset.name, eval_result)
log("Finished.")
if interrupt is not None:
raise interrupt # pylint: disable=raising-bad-type
def _is_logging_time(step: int, logging_period_batch: int,
last_log_time: float, logging_period_time: int):
if logging_period_batch is not None:
return step % logging_period_batch == logging_period_batch - 1
return last_log_time + logging_period_time < time.process_time()
def _resolve_period(period):
if isinstance(period, int):
return period, None
else:
regex = re.compile(
r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?"
r"((?P<seconds>\d+?)s)?")
parts = regex.match(period)
if not parts:
raise ValueError(
"Validation or logging period have incorrect format. "
"It should be in format: 3h; 5m; 14s")
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.items():
if param:
time_params[name] = int(param)
delta_seconds = timedelta(**time_params).total_seconds()
if delta_seconds <= 0:
raise ValueError(
"Validation or logging period must be bigger than 0")
return None, delta_seconds
def _check_series_collisions(runners: List[BaseRunner],
postprocess: Postprocess) -> None:
"""Check if output series names do not collide."""
runners_outputs = set() # type: Set[str]
for runner in runners:
series = runner.output_series
if series in runners_outputs:
raise Exception(("Output series '{}' is multiple times among the "
"runners' outputs.").format(series))
else:
runners_outputs.add(series)
if postprocess is not None:
for series, _ in postprocess:
if series in runners_outputs:
raise Exception(("Postprocess output series '{}' "
"already exists.").format(series))
else:
runners_outputs.add(series)
def run_on_dataset(tf_manager: TensorFlowManager,
runners: List[BaseRunner],
dataset: Dataset,
postprocess: Postprocess,
write_out: bool = False,
batch_size: Optional[int] = None,
log_progress: int = 0) -> Tuple[
List[ExecutionResult], Dict[str, List[Any]]]:
"""Apply the model on a dataset and optionally write outputs to files.
Args:
tf_manager: TensorFlow manager with initialized sessions.
runners: A function that runs the code
dataset: The dataset on which the model will be executed.
evaluators: List of evaluators that are used for the model
evaluation if the target data are provided.
postprocess: an object to use as postprocessing of the
write_out: Flag whether the outputs should be printed to a file defined
in the dataset object.
batch_size: size of the minibatch
log_progress: log progress every X seconds
extra_fetches: Extra tensors to evaluate for each batch.
Returns:
Tuple of resulting sentences/numpy arrays, and evaluation results if
they are available which are dictionary function -> value.
"""
contains_targets = all(dataset.has_series(runner.decoder_data_id)
for runner in runners
if runner.decoder_data_id is not None)
all_results = tf_manager.execute(dataset, runners,
compute_losses=contains_targets,
batch_size=batch_size,
log_progress=log_progress)
result_data = {runner.output_series: result.outputs
for runner, result in zip(runners, all_results)}
if postprocess is not None:
for series_name, postprocessor in postprocess:
postprocessed = postprocessor(dataset, result_data)
if not hasattr(postprocessed, "__len__"):
postprocessed = list(postprocessed)
result_data[series_name] = postprocessed
# check output series lengths
for series_id, data in result_data.items():
if len(data) != len(dataset):
warn("Output '{}' for dataset '{}' has length {}, but "
"len(dataset) == {}".format(series_id, dataset.name,
len(data), len(dataset)))
def _check_savable_dict(data):
"""Check if the data is of savable type."""
if not (data and data[0]):
return False
supported_type = Union[
List[Dict[str, np.ndarray]],
List[List[Dict[str, np.ndarray]]]]
try:
check_type("data", data, supported_type, None)
except TypeError:
return False
return True
if write_out:
for series_id, data in result_data.items():
if series_id in dataset.series_outputs:
path = dataset.series_outputs[series_id]
if isinstance(data, np.ndarray):
np.save(path, data)
log("Result saved as numpy array to '{}'".format(path))
elif _check_savable_dict(data):
unbatched = dict(
zip(data[0], zip(*[d.values() for d in data])))
np.savez(path, **unbatched)
log("Result saved as numpy data to '{}.npz'".format(path))
else:
with open(path, "w", encoding="utf-8") as f_out:
f_out.writelines(
[" ".join(sent) + "\n"
if isinstance(sent, collections.Iterable)
else str(sent) + "\n" for sent in data])
log("Result saved as plain text '{}'".format(path))
else:
log("There is no output file for dataset: {}"
.format(dataset.name), color="red")
return all_results, result_data
def evaluation(evaluators, dataset, runners, execution_results, result_data):
"""Evaluate the model outputs.
Args:
evaluators: List of tuples of series and evaluation functions.
dataset: Dataset against which the evaluation is done.
runners: List of runners (contains series ids and loss names).
execution_results: Execution results that include the loss values.
result_data: Dictionary from series names to list of outputs.
Returns:
Dictionary of evaluation names and their values which includes the
metrics applied on respective series loss and loss values from the run.
"""
eval_result = {}
# losses
for runner, result in zip(runners, execution_results):
for name, value in zip(runner.loss_names, result.losses):
eval_result["{}/{}".format(runner.output_series, name)] = value
# evaluation metrics
for generated_id, dataset_id, function in evaluators:
if (not dataset.has_series(dataset_id)
or generated_id not in result_data):
continue
desired_output = dataset.get_series(dataset_id)
model_output = result_data[generated_id]
eval_result["{}/{}".format(generated_id, function.name)] = function(
model_output, desired_output)
return eval_result
def _log_continuous_evaluation(tb_writer: tf.summary.FileWriter,
main_metric: str,
eval_result: Evaluation,
seen_instances: int,
epoch: int,
max_epochs: int,
execution_results: List[ExecutionResult],
train: bool = False,
dataset_name: str = None) -> None:
"""Log the evaluation results and the TensorBoard summaries."""
color, prefix = ("yellow", "train") if train else ("blue", "val")
if dataset_name is not None:
prefix += "_" + dataset_name
eval_string = _format_evaluation_line(eval_result, main_metric)
eval_string = "Epoch {}/{} Instances {} {}".format(epoch, max_epochs,
seen_instances,
eval_string)
log(eval_string, color=color)
if tb_writer:
for result in execution_results:
for summaries in [result.scalar_summaries,
result.histogram_summaries,
result.image_summaries]:
if summaries is not None:
tb_writer.add_summary(summaries, seen_instances)
external_str = \
tf.Summary(value=[tf.Summary.Value(tag=prefix + "_" + name,
simple_value=value)
for name, value in eval_result.items()])
tb_writer.add_summary(external_str, seen_instances)
def _format_evaluation_line(evaluation_res: Evaluation,
main_metric: str) -> str:
"""Format the evaluation metric for stdout with last one bold."""
eval_string = " ".join("{}: {:.4g}".format(name, value)
for name, value in evaluation_res.items()
if name != main_metric)
eval_string += colored(
" {}: {:.4g}".format(main_metric,
evaluation_res[main_metric]),
attrs=["bold"])
return eval_string
def print_final_evaluation(name: str, eval_result: Evaluation) -> None:
"""Print final evaluation from a test dataset."""
line_len = 22
log("Evaluating model on '{}'".format(name))
for eval_name, value in eval_result.items():
space = "".join([" " for _ in range(line_len - len(eval_name))])
log("... {}:{} {:.4g}".format(eval_name, space, value))
log_print("")
def _data_item_to_str(item: Any) -> str:
if isinstance(item, list):
return " ".join([str(i) for i in item])
if isinstance(item, str):
return item
if isinstance(item, np.ndarray) and len(item.shape) > 1:
return "[numpy tensor, shape {}]".format(item.shape)
return str(item)
def _print_examples(dataset: Dataset,
outputs: Dict[str, List[Any]],
val_preview_input_series: Optional[List[str]] = None,
val_preview_output_series: Optional[List[str]] = None,
num_examples=15) -> None:
"""Print examples of the model output.
Arguments:
dataset: The dataset from which to take examples
outputs: A mapping from the output series ID to the list of its
contents
val_preview_input_series: An optional list of input series to include
in the preview. An input series is a data series that is present in
the dataset. It can be either a target series (one that is also
present in the outputs, i.e. reference), or a source series (one
that is not among the outputs). In the validation preview, source
input series and preprocessed target series are yellow and target
(reference) series are red. If None, all series are written.
val_preview_output_series: An optional list of output series to include
in the preview. An output series is a data series that is present
among the outputs. In the preview, magenta is used as the font
color for output series
"""
log_print(colored("Examples:", attrs=["bold"]))
source_series_names = [s for s in dataset.series_ids if s not in outputs]
target_series_names = [s for s in dataset.series_ids if s in outputs]
output_series_names = list(outputs.keys())
assert outputs
if val_preview_input_series is not None:
target_series_names = [s for s in target_series_names
if s in val_preview_input_series]
source_series_names = [s for s in source_series_names
if s in val_preview_input_series]
if val_preview_output_series is not None:
output_series_names = [s for s in output_series_names
if s in val_preview_output_series]
# for further indexing we need to make sure, all relevant
# dataset series are lists
target_series = {series_id: list(dataset.get_series(series_id))
for series_id in target_series_names}
source_series = {series_id: list(dataset.get_series(series_id))
for series_id in source_series_names}
if not isinstance(dataset, LazyDataset):
num_examples = min(len(dataset), num_examples)
for i in range(num_examples):
log_print(colored(" [{}]".format(i + 1), color="magenta",
attrs=["bold"]))
def print_line(prefix, color, content):
colored_prefix = colored(prefix, color=color)
formatted = _data_item_to_str(content)
log_print(" {}: {}".format(colored_prefix, formatted))
# Input source series = yellow
for series_id, data in sorted(source_series.items(),
key=lambda x: x[0]):
print_line(series_id, "yellow", data[i])
# Output series = magenta
for series_id in sorted(output_series_names):
data = list(outputs[series_id])
model_output = data[i]
print_line(series_id, "magenta", model_output)
# Input target series (a.k.a. references) = red
for series_id in sorted(target_series_names):
data = outputs[series_id]
desired_output = target_series[series_id][i]
print_line(series_id + " (ref)", "red", desired_output)
log_print("")
def _skip_lines(start_offset: int,
batched_datasets: Iterable[Dataset]) -> None:
"""Skip training instances from the beginning.
Arguments:
start_offset: How many training instances to skip (minimum)
batched_datasets: From where to throw away batches
"""
log("Skipping first {} instances in the dataset".format(start_offset))
skipped_instances = 0
while skipped_instances < start_offset:
try:
skipped_instances += len(next(batched_datasets)) # type: ignore
except StopIteration:
raise ValueError("Trying to skip more instances than "
"the size of the dataset")
if skipped_instances > 0:
log("Skipped {} instances".format(skipped_instances))
def _log_model_variables(var_list: List[tf.Variable] = None) -> None:
trainable_vars = tf.trainable_variables()
if not var_list:
var_list = trainable_vars
assert var_list is not None
fixed_vars = [var for var in trainable_vars if var not in var_list]
total_params = 0
logstr = "The model has {} trainable variables{}:\n\n".format(
len(trainable_vars),
" ({} {})".format(len(fixed_vars), colored("fixed", on_color="on_red"))
if fixed_vars else "")
logstr += colored(
"{: ^80}{: ^20}{: ^10}\n".format("Variable name", "Shape", "Size"),
color="yellow", attrs=["bold"])
for var in trainable_vars:
shape = var.get_shape().as_list()
params_in_var = int(np.prod(shape))
total_params += params_in_var
name = var.name
if var not in var_list:
name = colored(name, on_color="on_red")
# Pad and compensate for control characters:
name = name.ljust(80 + (len(name) - len(var.name)))
log_entry = "{}{: <20}{: >10}".format(name, str(shape), params_in_var)
logstr += "\n{}".format(log_entry)
logstr += "\n"
log(logstr)
log("Total number of all parameters: {}".format(total_params))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.documentai_v1beta2.types import document
from google.cloud.documentai_v1beta2.types import document_understanding
from google.rpc import status_pb2 # type: ignore
from .transports.base import DocumentUnderstandingServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DocumentUnderstandingServiceGrpcAsyncIOTransport
from .client import DocumentUnderstandingServiceClient
class DocumentUnderstandingServiceAsyncClient:
"""Service to parse structured information from unstructured or
semi-structured documents using state-of-the-art Google AI such
as natural language, computer vision, and translation.
"""
_client: DocumentUnderstandingServiceClient
DEFAULT_ENDPOINT = DocumentUnderstandingServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DocumentUnderstandingServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(
DocumentUnderstandingServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DocumentUnderstandingServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(
DocumentUnderstandingServiceClient.common_folder_path
)
parse_common_folder_path = staticmethod(
DocumentUnderstandingServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
DocumentUnderstandingServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
DocumentUnderstandingServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(
DocumentUnderstandingServiceClient.common_project_path
)
parse_common_project_path = staticmethod(
DocumentUnderstandingServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
DocumentUnderstandingServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
DocumentUnderstandingServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DocumentUnderstandingServiceAsyncClient: The constructed client.
"""
return DocumentUnderstandingServiceClient.from_service_account_info.__func__(DocumentUnderstandingServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DocumentUnderstandingServiceAsyncClient: The constructed client.
"""
return DocumentUnderstandingServiceClient.from_service_account_file.__func__(DocumentUnderstandingServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return DocumentUnderstandingServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> DocumentUnderstandingServiceTransport:
"""Returns the transport used by the client instance.
Returns:
DocumentUnderstandingServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DocumentUnderstandingServiceClient).get_transport_class,
type(DocumentUnderstandingServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, DocumentUnderstandingServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the document understanding service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DocumentUnderstandingServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DocumentUnderstandingServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def batch_process_documents(
self,
request: Union[
document_understanding.BatchProcessDocumentsRequest, dict
] = None,
*,
requests: Sequence[document_understanding.ProcessDocumentRequest] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""LRO endpoint to batch process many documents. The output is
written to Cloud Storage as JSON in the [Document] format.
.. code-block:: python
from google.cloud import documentai_v1beta2
def sample_batch_process_documents():
# Create a client
client = documentai_v1beta2.DocumentUnderstandingServiceClient()
# Initialize request argument(s)
requests = documentai_v1beta2.ProcessDocumentRequest()
requests.input_config.gcs_source.uri = "uri_value"
requests.input_config.mime_type = "mime_type_value"
request = documentai_v1beta2.BatchProcessDocumentsRequest(
requests=requests,
)
# Make the request
operation = client.batch_process_documents(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.documentai_v1beta2.types.BatchProcessDocumentsRequest, dict]):
The request object. Request to batch process documents
as an asynchronous operation. The output is written to
Cloud Storage as JSON in the [Document] format.
requests (:class:`Sequence[google.cloud.documentai_v1beta2.types.ProcessDocumentRequest]`):
Required. Individual requests for
each document.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.documentai_v1beta2.types.BatchProcessDocumentsResponse` Response to an batch document processing request. This is returned in
the LRO Operation after the operation is complete.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = document_understanding.BatchProcessDocumentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if requests:
request.requests.extend(requests)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_process_documents,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=120.0,
),
default_timeout=120.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
document_understanding.BatchProcessDocumentsResponse,
metadata_type=document_understanding.OperationMetadata,
)
# Done; return the response.
return response
async def process_document(
self,
request: Union[document_understanding.ProcessDocumentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> document.Document:
r"""Processes a single document.
.. code-block:: python
from google.cloud import documentai_v1beta2
def sample_process_document():
# Create a client
client = documentai_v1beta2.DocumentUnderstandingServiceClient()
# Initialize request argument(s)
input_config = documentai_v1beta2.InputConfig()
input_config.gcs_source.uri = "uri_value"
input_config.mime_type = "mime_type_value"
request = documentai_v1beta2.ProcessDocumentRequest(
input_config=input_config,
)
# Make the request
response = client.process_document(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.documentai_v1beta2.types.ProcessDocumentRequest, dict]):
The request object. Request to process one document.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.documentai_v1beta2.types.Document:
Document represents the canonical
document resource in Document
Understanding AI. It is an interchange
format that provides insights into
documents and allows for collaboration
between users and Document Understanding
AI to iterate and optimize for quality.
"""
# Create or coerce a protobuf request object.
request = document_understanding.ProcessDocumentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.process_document,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=120.0,
),
default_timeout=120.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-documentai",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DocumentUnderstandingServiceAsyncClient",)
|
|
#!/usr/bin/env python
import numpy as np
from scipy.interpolate import interp1d
import collections
import matplotlib.pyplot as plt
##set values to dictionary
def set_dic(a,param_d):
param_d['V0'] = a[0]
param_d['K0'] = a[1]
param_d['K0p'] = a[2]
param_d['theta0'] = np.exp(a[3])
param_d['gamma0'] = np.exp(a[4])
param_d['q'] = a[5]
return param_d
# def get_dic(a):
# param_d['V0'] = a[0]
# param_d['K0'] = a[1]
# param_d['K0p'] = a[2]
# param_d['theta0'] = np.exp(a[3])
# param_d['gamma0'] = np.exp(a[4])
# param_d['q'] = a[5]
# return param_d
def set_const(param_d):
param_d['Natom'] = np.nan
param_d['kB'] = 13.806488*10**(-24) #J per K
param_d['P_conv_Fac'] = 160.217657*(1/1.6021764e-19) #GPa in 1 J/Ang^3
param_d['C_DP'] = 3*param_d['kB']*param_d['Natom']*param_d['P_conv_Fac'] #3nR
def set_material_const(Natom, param_d): #Natom, kB, P_conv_fac, C_DP
param_d['Natom'] = Natom
param_d['C_DP'] = 3*param_d['kB']*param_d['Natom']*param_d['P_conv_Fac'] #3nR
# print "C_DP", param_d['C_DP']
def set_constshifted(param_d):
param_d['Natom'] = 4
param_d['kB'] = 8.6173324e-5 #eV per K
param_d['P_conv_Fac'] = 160.217657#GPa in 1 eV/Ang^3
param_d['C_DP'] = 3*param_d['kB'] *param_d['Natom']#Dulong-Petit limit for Cv
def set_material_constshifted(Natom,param_d): #Natom, kB, P_conv_fac, C_DP
param_d['Natom'] = Natom
param_d['C_DP'] = 3*param_d['kB'] *param_d['Natom']#Dulong-Petit limit for Cv
def debye_fun(x):
"""
Return debye integral value
- calculation done using interpolation in a lookup table
- interpolation done in log-space where behavior is close to linear
- linear extrapolation is implemented manually
"""
if(np.isscalar(x)):
assert x >= 0, 'x values must be greater than zero.'
else:
#np.absolute(x)
assert all(x >= 0), 'x values must be greater than zero.'
# Lookup table
# interpolate in log space where behavior is nearly linear
debyex = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8,
3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0])
debyelogf = np.array([ 0.0, -0.03770187, -0.07580279, -0.11429475,
-0.15316866, -0.19241674, -0.2320279 , -0.27199378,
-0.31230405, -0.35294619, -0.39390815, -0.43518026,
-0.47674953, -0.51860413, -0.56072866, -0.64573892,
-0.73167389, -0.81841793, -0.90586032, -0.99388207,
-1.08236598, -1.17119911, -1.26026101, -1.34944183,
-1.43863241, -1.52771969, -1.61660856, -1.70519469,
-1.79338479, -1.88108917, -1.96822938, -2.05471771,
-2.14049175, -2.35134476, -2.55643273, -2.75507892,
-2.94682783, -3.13143746, -3.30880053, -3.47894273,
-3.64199587, -3.79820337, -3.94785746])
# Create interpolation function
logdebfun = interp1d(debyex, debyelogf, kind='cubic', bounds_error=False,
fill_value=np.nan)
logfval = logdebfun(x)
# Check for extrapolated values indicated by NaN
# - replace with linear extrapolation
logfval = np.where(x > debyex[-1], debyelogf[-1] + (x - debyex[-1]) *
(debyelogf[-1]-debyelogf[-2])/(debyex[-1]-debyex[-2]),
logfval)
# Exponentiate to get integral value
return np.exp(logfval)
def press_vinet(v,param_d):
V0 = param_d['V0']
K0 = param_d['K0']
Kp = param_d['K0p']
x = (v/V0)**(1./3)
Vinet = 3*K0*(1.-x)*x**(-2)*np.exp(3./2.*(Kp - 1.)*(1. - x))
return Vinet
def Ptot_powerlaw(volume,temperature,param_d,RefT):
gamma= param_d['gamma0']*(volume/param_d['V0'])**param_d['q']
theta = param_d['theta0']*np.exp((-1)*(gamma-param_d['gamma0'])/param_d['q'])
Debye_Int = debye_fun(theta/RefT)
# print gamma,theta,Debye_Int
# print "C_DP in therm",param_d['C_DP']
P_th_ref = param_d['C_DP']*gamma*RefT*Debye_Int/volume
# print "P_th_ref",P_th_ref
# print volume
P_th = (param_d['C_DP']*gamma*temperature*debye_fun(theta/temperature))/volume
# print "P_th",P_th
# print "thermal pressure", P_th - P_th_ref
# print "Natom",param_d['Natom']
return P_th - P_th_ref
def Ptot_shiftedpowlaw(volume,temperature,param_d):
x = (volume/param_d['V0'])**(1./3)
gammaV = param_d['gamma0']*x**(3*param_d['q'])+1./2
thetaV = param_d['theta0']*x**(-3./2)*np.exp(param_d['gamma0']/param_d['q']*((1-x**(3.*param_d['q']))))
debye_Int = debye_fun(thetaV/temperature)
P_th = (param_d['C_DP']*temperature*gammaV/volume*debye_Int)*param_d['P_conv_Fac']
return P_th
def MGD_PowerLaw(volume, temperature,param_d,Natom = 4):
# if np.isscalar(temperature):
# temperature = temperature*np.ones(len(volume))
#assert len(volume)==len(temperature), 'temperature should be a scalar or its length should be equal to volume'
#set parameters into dictionary
set_const(param_d)
if Natom != 4:
set_const(param_d)
set_material_const(Natom,param_d)
Vinet = press_vinet(volume,param_d)
#print "Vinet", Vinet
RefT = 300.0 #unit is K
Ptot = Ptot_powerlaw(volume,temperature,param_d,RefT)
print "P_tot",Ptot
MGD = Vinet + Ptot
return MGD
def MGD_PowerLawShifted(volume, temperature, param_d,Natom=4):
set_constshifted(param_d)
if Natom!=4:
set_material_constshifted(Natom,param_d)
#param_d['kB'] = 8.6173324e-5 #eV per K
#param_d['P_conv_Fac'] = 160.217657#GPa in 1 eV/Ang^3
#param_d['C_DP'] = 3*param_d['kB'] *param_d['Natom']#Dulong-Petit limit for Cv
print param_d
Vinet = press_vinet(volume,param_d)
x = (volume/param_d['V0'])**(1./3)
#gammaV = param_d['gamma0']*x**(3*param_d['q'])+1./2
#thetaV = param_d['theta0']*x**(-3./2)*np.exp(param_d['gamma0']/param_d['q']*((1-x**(3.*param_d['q']))))
#debye_Int = debye_fun(thetaV/temperature)
#P_th = (param_d['C_DP']*temperature*gammaV/volume*debye_Int)*param_d['P_conv_Fac']
P_th = Ptot_shiftedpowlaw(volume,temperature,param_d)
#compute P(V,T)
MGD = Vinet + P_th
return MGD
"""
def MGD_PowerLaw(volume, temperature,p_eos, Natom): # MGD_PowerLaw
if np.isscalar(temperature):
temperature = temperature*np.ones(len(volume))
assert len(p_eos)==6, 'EOS parameter array must have correct length of 6'
assert len(volume)==len(temperature), 'temperature should be a scalar or its length should be equal to volume'
kB = 13.806488*10**(-24) #J per K
P_conv_Fac= 160.217657*6.24*10**(18) #GPa in 1 J/Ang^3
C_DP = 3*kB*Natom*P_conv_Fac
# Natom = # of atoms in unitcell
# V = volume of unitcell
#the cold function(Vinet function) Vinet function parameters
#V0_0Fe = 162.12
#K0_0Fe = 262.3
#Kp_0Fe = 4.044
#V0_13Fe = 163.16
#K0_13Fe = 243.8
#Kp_13Fe = 4.160
V0 = p_eos[0]
K0 = p_eos[1]
Kp = p_eos[2]
#Thermal function parameters
#Theta0_0Fe = 1000
#Gamma0_0Fe = 1.675
#q_0Fe = 1.39
#Theta0_13Fe = 1000
#Gamma0_13Fe = 1.400
#q_13Fe = 0.56
Theta0 = p_eos[3]
Gamma0 = p_eos[4]
q = p_eos[5]
RefT = 300.0 #unit is K
x = (volume/V0)**(1./3)
Vinet = 3*K0*(1.-x)*x**(-2)*np.exp(3./2.*(Kp - 1.)*(1. - x))
gamma= Gamma0 *(volume/V0)**q
theta = Theta0*np.exp((-1)*(gamma-Gamma0)/q)
#compute the P_thermal(V,300K)
Debye_Int = debye_fun(theta/RefT)
P_th_ref = C_DP*gamma*RefT*Debye_Int/volume
#compute P_th in different temperatures
P_th = (C_DP*gamma*temperature*debye_fun(theta/temperature))/volume
#compute P(V,T)
MGD = Vinet + P_th - P_th_ref
return MGD
"""
"""
def MGD_PowerLawShifted(volume, temperature, p_eos, Natom):
# Natom = # of atoms in unitcell
# V = volume of unitcell
P_conv_Fac= 160.217657 #GPa in 1 eV/Ang^3
kB = 8.6173324e-5 #eV per K
C_DP = 3*kB*Natom#Dulong-Petit limit for Cv
#Vinet function parameters
#sequence of the p_eos: V0, K0, Kp, theta0, gamma0, q
V0 = p_eos[0] #V0_Ne = 22.234
K0 = p_eos[1] #K0_Ne = 1.070
Kp = p_eos[2] #Kp_Ne = 8.40
#Thermal function parameters
Theta0 = p_eos[3] #Theta0_Ne = 75.1
Gamma0 = p_eos[4] #Gamma0_Ne = 2.442
q = p_eos[5] #q_Ne = 0.97
#RefT = 0
x = (volume/V0)**(1./3)
Vinet = 3.*K0*(1-x)*x**(-2)*np.exp(3./2.*(Kp - 1.)*(1-x)) #Pcold = Vinet_Ne
gammaV = Gamma0*x**(3*q)+1./2
thetaV = Theta0*x**(-3./2)*np.exp(Gamma0/q*((1-x**(3.*q))))
debye_Int = debye_fun(thetaV/temperature)
P_th = (C_DP*temperature*gammaV/volume*debye_Int)*P_conv_Fac
#compute P(V,T)
MGD = Vinet + P_th
return MGD
"""
"""
#test Dewaele's table
param_d = collections.OrderedDict()
p_eos = np.array([22.234,1.070,8.40,75.1,2.442,0.97])
set_dic(p_eos)
param_d['Natom'] = 4
volume = np.array([13.69743329, 12.31533725, 10.845, 10.305, 7.827])
temperature = np.array([298,298,500,750,900])
print (MGD_PowerLawShifted(volume, temperature,param_d))
"""
#plot Dewaele's table
#sequence of the p_eos: V0, K0, Kp, theta0, gamma0, q
"""
p_eos = np.array([22.234,1.070,8.40,75.1,2.442,0.97])
Nat = 1
Nedat = np.loadtxt(fname='Ne.md', delimiter='|', skiprows=3)
#temp_298 = np.zeros([34])
vol_298 = np.zeros([34])
ob_298 = np.zeros([34])
#temp_500 = np.zeros([5])
vol_500 = np.zeros([5])
ob_500 = np.zeros([5])
#temp_600 = np.zeros([6])
vol_600 = np.zeros([6])
ob_600 = np.zeros([6])
#temp_750 = np.zeros([5])
vol_750 = np.zeros([5])
ob_750 = np.zeros([5])
#temp_900 = np.zeros([6])
vol_900 = np.zeros([6])
ob_900 = np.zeros([6])
i_298 = 0
i_500 = 0
i_600 = 0
i_750 = 0
i_900 = 0
for ind in range(len(Nedat)):
if Nedat[ind,0] == 298:
ob_298[i_298] = Nedat[ind,1]
vol_298[i_298] = Nedat[ind,2]
i_298 = i_298 + 1
if Nedat[ind,0] > 499 and Nedat[ind,0] < 502:
ob_500[i_500] = Nedat[ind,1]
vol_500[i_500] = Nedat[ind,2]
i_500 = i_500 + 1
if Nedat[ind,0] == 600:
ob_600[i_600] = Nedat[ind,1]
vol_600[i_600] = Nedat[ind,2]
i_600 = i_600 + 1
if Nedat[ind,0] == 750:
ob_750[i_750] = Nedat[ind,1]
vol_750[i_750] = Nedat[ind,2]
i_750 = i_750 + 1
if Nedat[ind,0] == 900:
ob_900[i_900] = Nedat[ind,1]
vol_900[i_900] = Nedat[ind,2]
i_900 = i_900 + 1
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([298, 500, 600, 750, 900])
model_298 = MGD_PowerLawShifted(volume1,T[0]*np.ones(volume1.shape),p_eos,Nat)
model_500 = MGD_PowerLawShifted(volume1,T[1]*np.ones(volume1.shape),p_eos,Nat)
model_600 = MGD_PowerLawShifted(volume1,T[2]*np.ones(volume1.shape),p_eos,Nat)
model_750 = MGD_PowerLawShifted(volume1,T[3]*np.ones(volume1.shape),p_eos,Nat)
model_900 = MGD_PowerLawShifted(volume1,T[4]*np.ones(volume1.shape),p_eos,Nat)
plt.plot(model_298,volume1,'k',label = '298 Model')
plt.plot(model_500,volume1,'c',label = '500 Model')
plt.plot(model_600,volume1,'r',label = '600 Model')
plt.plot(model_750,volume1,'m',label = '750 Model')
plt.plot(model_900,volume1,'y',label = '900 Model')
plt.plot(ob_298,vol_298, 'ko',label = '298')
plt.plot(ob_500,vol_500, 'co',label = '500')
plt.plot(ob_600,vol_600, 'ro',label = '600')
plt.plot(ob_750,vol_750, 'mo',label = '750')
plt.plot(ob_900,vol_900, 'yo',label = '900')
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.legend()
plt.show()
test298 = MGD_PowerLawShifted(vol_298,T[0]*np.ones(vol_298.shape),p_eos,1)
#print "vol_298",vol_298
#print test298
print test298 - ob_298
#print model_500 - ob_500
"""
#test Dr Wolf's table
#volume = np.array([146.59, 145.81, 144.97, 144.32, 146.35, 131.26,142.52,133.96,125.42,133.86,133.91,133.71,125.42,125.40,124.05])
#temperature = np.array([300,300,300,300,1700,300,1924,2375,2020,1755,1780,1740,2228,2240,2045])
#p_eos_0Fe = np.array([162.12,262.3,4.044,1000,1.675,1.39])
#print (MGD_Vinet(volume, temperature, p_eos1,20))
#plot 13% Fe
"""
p_eos = np.array([163.16,243.8,4.160,1000,1.400,0.56])
Pvdat = np.loadtxt(fname='Fe_13.md', delimiter='|', skiprows=3)
temp_300 = np.zeros([49])
vol_300 = np.zeros([49])
ob_300 = np.zeros([49])
i_300 = 0
for ind in range(len(Pvdat)):
if Pvdat[ind,0] == 300:
temp_300[i_300] = Pvdat[ind,0]
ob_300[i_300] = Pvdat[ind,1]
vol_300[i_300] = Pvdat[ind,2]
i_300 = i_300 + 1
#p_300 = MGD_Vinet(vol_300, temp_300,p_eos_13Fe,20)
plt.plot(ob_300,vol_300, 'ko',label = '300')
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
temp1 = np.zeros(len(volume1))
model_300 = MGD_PowerLaw(volume1,temp1+300,p_eos,20)
print(model_300)
plt.plot(model_300,volume1,'k',label = '300 Model')
plt.xlim([20,140])
plt.ylim([122,155])
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.legend()
plt.show()
"""
####color plot Wolf's PVTMgPvTange.txt
"""
Pvdat = np.loadtxt(fname='PVTMgPvTange.txt', skiprows=1)
volume = Pvdat[:,5]
experiment_P = Pvdat[:,1]
p_eos = np.array([162.12,262.3,4.044,1000,1.675,1.39])
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([300,500,700,900,1700,1900,2100,2300,2500])
model_P_300 = MGD_PowerLaw(volume1,T[0]*np.ones(volume1.shape),p_eos,20)
model_P_500 = MGD_PowerLaw(volume1,T[1]*np.ones(volume1.shape),p_eos,20)
model_P_700 = MGD_PowerLaw(volume1,T[2]*np.ones(volume1.shape),p_eos,20)
model_P_900 = MGD_PowerLaw(volume1,T[3]*np.ones(volume1.shape),p_eos,20)
model_P_1700 = MGD_PowerLaw(volume1,T[4]*np.ones(volume1.shape),p_eos,20)
model_P_1900 = MGD_PowerLaw(volume1,T[5]*np.ones(volume1.shape),p_eos,20)
model_P_2100 = MGD_PowerLaw(volume1,T[6]*np.ones(volume1.shape),p_eos,20)
model_P_2300 = MGD_PowerLaw(volume1,T[7]*np.ones(volume1.shape),p_eos,20)
model_P_2500 = MGD_PowerLaw(volume1,T[8]*np.ones(volume1.shape),p_eos,20)
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.clf()
cmap = plt.get_cmap('gist_rainbow')
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
plt.colorbar(ticks=range(300,2500,500))
plt.clim([300, 2500])
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
plt.legend()
plt.plot(model_P_300,volume1,c = cmap(30))
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
####color plot Wolf's PVTMgFePvWolf.txt
"""
Pvdat = np.loadtxt(fname='PVTMgFePvWolf.txt', skiprows=1)
volume = Pvdat[:,5]
experiment_P = Pvdat[:,1]
p_eos = np.array([163.16,243.8,4.160,1000,1.400,0.56])
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([300,500,700,900,1700,1900,2100,2300,2500])
model_P_300 = MGD_PowerLaw(volume1,T[0]*np.ones(volume1.shape),p_eos,20)
model_P_500 = MGD_PowerLaw(volume1,T[1]*np.ones(volume1.shape),p_eos,20)
model_P_700 = MGD_PowerLaw(volume1,T[2]*np.ones(volume1.shape),p_eos,20)
model_P_900 = MGD_PowerLaw(volume1,T[3]*np.ones(volume1.shape),p_eos,20)
model_P_1700 = MGD_PowerLaw(volume1,T[4]*np.ones(volume1.shape),p_eos,20)
model_P_1900 = MGD_PowerLaw(volume1,T[5]*np.ones(volume1.shape),p_eos,20)
model_P_2100 = MGD_PowerLaw(volume1,T[6]*np.ones(volume1.shape),p_eos,20)
model_P_2300 = MGD_PowerLaw(volume1,T[7]*np.ones(volume1.shape),p_eos,20)
model_P_2500 = MGD_PowerLaw(volume1,T[8]*np.ones(volume1.shape),p_eos,20)
"""
"""
plt.plot(experiment_P,volume,'ko',label = 'experiment')
plt.plot(model_P_300,volume1,'b',label = '300K')
plt.plot(model_P_500,volume1,'g',label = '500K')
plt.plot(model_P_700,volume1,'r',label = '700K')
plt.plot(model_P_900,volume1,'y',label = '900K')
plt.plot(model_P_1700,volume1,'b',label = '1700K')
plt.plot(model_P_1900,volume1,'g',label = '1900K')
plt.plot(model_P_2100,volume1,'r',label = '2100K')
plt.plot(model_P_2300,volume1,'y',label = '2300K')
plt.plot(model_P_2500,volume1,'b',label = '2500K')
#plt.xlim([20,140])
#plt.ylim([122,155])
"""
"""
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.clf()
"""
"""
plt.scatter(model_P_300,volume1,30,T[0]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_500,volume1,30,T[1]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_700,volume1,30,T[2]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_900,volume1,30,T[3]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_1700,volume1,30,T[4]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_1900,volume1,30,T[5]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2100,volume1,30,T[6]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2300,volume1,30,T[7]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2500,volume1,30,T[8]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
"""
###original plotting script
"""
cmap = plt.get_cmap('gist_rainbow')
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
plt.colorbar(ticks=range(300,2500,500))
plt.clim([300, 2500])
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
plt.legend()
plt.plot(model_P_300,volume1,c = cmap(30))
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
###test plotting below
"""
cmap = plt.get_cmap('gist_rainbow')
climvals = [300,2500]
plt.colorbar(ticks=range(300,2500,500))
plt.clim(climvals)
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
Tcolbar = np.linspace(climvals[0],climvals[1],len(cmap))
Indcolbar = np.range(0,len(cmap))
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
for ind, Tval in enumerate(T):
indcmap = np.interp1d(Tcolbar,Indcolbar,Tval,kind='nearest')
plt.plot(model_P[ind],volume1,c = cmap[indcmap])
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
|
|
import collections
import datetime
import functools
import ipaddress
import os
from king_phisher import serializers
from king_phisher.client import gui_utilities
from king_phisher.client import plugins
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
from gi.repository import GObject
from gi.repository import Gtk
import jsonschema
import rule_engine
import rule_engine.errors
relpath = functools.partial(os.path.join, os.path.dirname(os.path.realpath(__file__)))
gtk_builder_file = relpath('request_redirect.ui')
json_schema_file = relpath('schema.json')
_ModelNamedRow = collections.namedtuple('ModelNamedRow', (
'index',
'target',
'permanent',
'type',
'text'
))
def named_row_to_entry(named_row):
entry = {
'permanent': named_row.permanent,
'target': named_row.target,
named_row.type.lower(): named_row.text
}
return entry
def _update_model_indexes(model, starting, modifier):
for row in model:
named_row = _ModelNamedRow(*row)
if named_row.index < starting:
continue
model[row.iter][_ModelNamedRow._fields.index('index')] += modifier
class _CellRendererIndex(getattr(extras, 'CellRendererPythonText', object)):
python_value = GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE)
@staticmethod
def render_python_value(value):
if not isinstance(value, int):
return
return str(value + 1)
class Plugin(plugins.ClientPlugin):
authors = ['Spencer McIntyre']
title = 'Request Redirect'
description = """
Edit entries for the server "Request Redirect" plugin.
"""
homepage = 'https://github.com/securestate/king-phisher'
req_min_version = '1.14.0b1'
version = '1.0.0'
def initialize(self):
self.window = None
if not os.access(gtk_builder_file, os.R_OK):
gui_utilities.show_dialog_error(
'Plugin Error',
self.application.get_active_window(),
"The GTK Builder data file ({0}) is not available.".format(os.path.basename(gtk_builder_file))
)
return False
self._label_summary = None
self._rule_context = None
self._tv = None
self._tv_model = Gtk.ListStore(int, str, bool, str, str)
self._tv_model.connect('row-inserted', self.signal_model_multi)
self._tv_model.connect('row-deleted', self.signal_model_multi)
self.menu_items = {}
self.menu_items['edit_rules'] = self.add_menu_item('Tools > Request Redirect Rules', self.show_editor_window)
return True
def _editor_refresh(self):
self.application.rpc.async_call(
'plugins/request_redirect/entries/list',
on_success=self.asyncrpc_list,
when_idle=False
)
def _editor_delete(self, treeview, selection):
selection = treeview.get_selection()
(model, tree_paths) = selection.get_selected_rows()
if not tree_paths:
return
rows = []
for tree_path in tree_paths:
rows.append((_ModelNamedRow(*model[tree_path]).index, Gtk.TreeRowReference.new(model, tree_path)))
if len(rows) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(rows))
if not gui_utilities.show_dialog_yes_no(message, self.window, 'This information will be lost.'):
return
rows = reversed(sorted(rows, key=lambda item: item[0]))
for row_index, row_ref in rows:
self.application.rpc.async_call(
'plugins/request_redirect/entries/remove',
(row_index,),
on_success=self.asyncrpc_remove,
when_idle=True,
cb_args=(model, row_ref)
)
def _update_remote_entry(self, path):
named_row = _ModelNamedRow(*self._tv_model[path])
entry = named_row_to_entry(named_row)
self.application.rpc.async_call(
'plugins/request_redirect/entries/set',
(named_row.index, entry)
)
def finalize(self):
if self.window is not None:
self.window.destroy()
def show_editor_window(self, _):
self.application.rpc.async_graphql(
'query getPlugin($name: String!) { plugin(name: $name) { version } }',
query_vars={'name': self.name},
on_success=self.asyncrpc_graphql,
when_idle=True
)
def asyncrpc_graphql(self, plugin_info):
if plugin_info['plugin'] is None:
gui_utilities.show_dialog_error(
'Missing Server Plugin',
self.application.get_active_window(),
'The server side plugin is missing. It must be installed and enabled by the server administrator.'
)
return
self.application.rpc.async_call(
'plugins/request_redirect/permissions',
on_success=self.asyncrpc_permissions,
when_idle=True
)
def asyncrpc_permissions(self, permissions):
writable = 'write' in permissions
if self.window is None:
self.application.rpc.async_call(
'plugins/request_redirect/rule_symbols',
on_success=self.asyncrpc_symbols,
when_idle=False
)
builder = Gtk.Builder()
self.logger.debug('loading gtk builder file from: ' + gtk_builder_file)
builder.add_from_file(gtk_builder_file)
self.window = builder.get_object('RequestRedirect.editor_window')
self.window.set_transient_for(self.application.get_active_window())
self.window.connect('destroy', self.signal_window_destroy)
self._tv = builder.get_object('RequestRedirect.treeview_editor')
self._tv.set_model(self._tv_model)
tvm = managers.TreeViewManager(
self._tv,
cb_delete=(self._editor_delete if writable else None),
cb_refresh=self._editor_refresh,
selection_mode=Gtk.SelectionMode.MULTIPLE,
)
# target renderer
target_renderer = Gtk.CellRendererText()
if writable:
target_renderer.set_property('editable', True)
target_renderer.connect('edited', functools.partial(self.signal_renderer_edited, 'target'))
# permanent renderer
permanent_renderer = Gtk.CellRendererToggle()
if writable:
permanent_renderer.connect('toggled', functools.partial(self.signal_renderer_toggled, 'permanent'))
# type renderer
store = Gtk.ListStore(str)
store.append(['Rule'])
store.append(['Source'])
type_renderer = Gtk.CellRendererCombo()
type_renderer.set_property('has-entry', False)
type_renderer.set_property('model', store)
type_renderer.set_property('text-column', 0)
if writable:
type_renderer.set_property('editable', True)
type_renderer.connect('edited', self.signal_renderer_edited_type)
# text renderer
text_renderer = Gtk.CellRendererText()
if writable:
text_renderer.set_property('editable', True)
text_renderer.connect('edited', functools.partial(self.signal_renderer_edited, 'text'))
tvm.set_column_titles(
('#', 'Target', 'Permanent', 'Type', 'Text'),
renderers=(
_CellRendererIndex(), # index
target_renderer, # Target
permanent_renderer, # Permanent
type_renderer, # Type
text_renderer # Text
)
)
# treeview right-click menu
menu = tvm.get_popup_menu()
if writable:
menu_item = Gtk.MenuItem.new_with_label('Insert')
menu_item.connect('activate', self.signal_menu_item_insert)
menu_item.show()
menu.append(menu_item)
# top menu bar
menu_item = builder.get_object('RequestRedirect.menuitem_import')
menu_item.connect('activate', self.signal_menu_item_import)
menu_item.set_sensitive(writable)
menu_item = builder.get_object('RequestRedirect.menuitem_export')
menu_item.connect('activate', self.signal_menu_item_export)
infobar = builder.get_object('RequestRedirect.infobar_read_only_warning')
infobar.set_revealed(not writable)
button = builder.get_object('RequestRedirect.button_read_only_acknowledgment')
button.connect('clicked', lambda _: infobar.set_revealed(False))
self._label_summary = builder.get_object('RequestRedirect.label_summary')
self._editor_refresh()
self.window.show()
self.window.present()
def asyncrpc_list(self, entries):
things = []
for idx, rule in enumerate(entries):
if 'rule' in rule:
type_ = 'Rule'
text = rule['rule']
elif 'source' in rule:
type_ = 'Source'
text = rule['source']
else:
self.logger.warning("rule #{0} contains neither a rule or source key".format(idx))
continue
things.append((idx, rule['target'], rule['permanent'], type_, text))
gui_utilities.glib_idle_add_store_extend(self._tv_model, things, clear=True)
def asyncrpc_remove(self, model, row_ref, _):
tree_path = row_ref.get_path()
if tree_path is None:
return
old_index = _ModelNamedRow(*model[tree_path]).index
del model[tree_path]
_update_model_indexes(model, old_index, -1)
def asyncrpc_symbols(self, symbols):
symbols = {k: getattr(rule_engine.DataType, v) for k, v in symbols.items()}
type_resolver = rule_engine.type_resolver_from_dict(symbols)
self._rule_context = rule_engine.Context(type_resolver=type_resolver)
def signal_menu_item_export(self, _):
dialog = extras.FileChooserDialog('Export Entries', self.window)
response = dialog.run_quick_save('request_redirect.json')
dialog.destroy()
if not response:
return
entries = []
for row in self._tv_model:
named_row = _ModelNamedRow(*row)
entries.append(named_row_to_entry(named_row))
export = {
'created': datetime.datetime.utcnow().isoformat() + '+00:00',
'entries': entries
}
with open(response['target_path'], 'w') as file_h:
serializers.JSON.dump(export, file_h)
def signal_menu_item_import(self, _):
dialog = extras.FileChooserDialog('Import Entries', self.window)
dialog.quick_add_filter('Data Files', '*.json')
dialog.quick_add_filter('All Files', '*')
response = dialog.run_quick_open()
dialog.destroy()
if not response:
return
try:
with open(response['target_path'], 'r') as file_h:
data = serializers.JSON.load(file_h)
except Exception:
gui_utilities.show_dialog_error(
'Import Failed',
self.window,
'Could not load the specified file.'
)
return
with open(json_schema_file, 'r') as file_h:
schema = serializers.JSON.load(file_h)
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.ValidationError:
gui_utilities.show_dialog_error(
'Import Failed',
self.window,
'Could not load the specified file, the data is malformed.'
)
return
cursor = len(self._tv_model)
for entry in data['entries']:
if 'rule' in entry:
entry_type = 'Rule'
text = entry['rule']
elif 'source' in entry:
entry_type = 'Source'
text = entry['source']
new_named_row = _ModelNamedRow(cursor, entry['target'], entry['permanent'], entry_type, text)
self.application.rpc.async_call(
'plugins/request_redirect/entries/insert',
(cursor, named_row_to_entry(new_named_row))
)
self._tv_model.append(new_named_row)
cursor += 1
def signal_menu_item_insert(self, _):
selection = self._tv.get_selection()
new_named_row = _ModelNamedRow(len(self._tv_model), '', True, 'Source', '0.0.0.0/32')
if selection.count_selected_rows() == 0:
self._tv_model.append(new_named_row)
elif selection.count_selected_rows() == 1:
(model, tree_paths) = selection.get_selected_rows()
tree_iter = model.get_iter(tree_paths[0])
new_named_row = new_named_row._replace(index=_ModelNamedRow(*model[tree_iter]).index)
_update_model_indexes(model, new_named_row.index, 1)
self._tv_model.insert_before(tree_iter, new_named_row)
else:
gui_utilities.show_dialog_error(
'Can Not Insert Entry',
self.window,
'Can not insert a new entry when multiple entries are selected.'
)
return
entry = named_row_to_entry(new_named_row)
self.application.rpc.async_call(
'plugins/request_redirect/entries/set',
(new_named_row.index, entry)
)
def signal_model_multi(self, model, *_):
if self._label_summary is None:
return
self._label_summary.set_text("Showing {:,} Redirect Configuration{}".format(len(model), '' if len(model) == 1 else 's'))
def signal_renderer_edited(self, field, _, path, text):
text = text.strip()
if field == 'text':
entry_type = self._tv_model[path][_ModelNamedRow._fields.index('type')].lower()
if entry_type == 'source':
try:
ipaddress.ip_network(text)
except ValueError:
gui_utilities.show_dialog_error('Invalid Source', self.window, 'The specified text is not a valid IP network in CIDR notation.')
return
else:
try:
rule_engine.Rule(text, context=self._rule_context)
except rule_engine.SymbolResolutionError as error:
gui_utilities.show_dialog_error('Invalid Rule', self.window, "The specified rule text contains the unknown symbol {!r}.".format(error.symbol_name))
return
except rule_engine.errors.SyntaxError:
gui_utilities.show_dialog_error('Invalid Rule', self.window, 'The specified rule text contains a syntax error.')
return
except rule_engine.errors.EngineError:
gui_utilities.show_dialog_error('Invalid Rule', self.window, 'The specified text is not a valid rule.')
return
self._tv_model[path][_ModelNamedRow._fields.index(field)] = text
self._update_remote_entry(path)
def signal_renderer_edited_type(self, _, path, text):
field_index = _ModelNamedRow._fields.index('type')
if self._tv_model[path][field_index] == text:
return
self._tv_model[path][field_index] = text
if text.lower() == 'source':
self._tv_model[path][_ModelNamedRow._fields.index('text')] = '0.0.0.0/32'
elif text.lower() == 'rule':
self._tv_model[path][_ModelNamedRow._fields.index('text')] = 'false'
self._update_remote_entry(path)
def signal_renderer_toggled(self, field, _, path):
index = _ModelNamedRow._fields.index(field)
self._tv_model[path][index] = not self._tv_model[path][index]
self._update_remote_entry(path)
def signal_window_destroy(self, window):
self.window = None
|
|
#!/usr/bin/env python3
# *-* coding: utf-8 *-*
# This file is part of butterfly
#
# butterfly Copyright(C) 2015-2017 Florian Mounier
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tornado.options
import tornado.ioloop
import tornado.httpserver
try:
from tornado_systemd import SystemdHTTPServer as HTTPServer
except ImportError:
from tornado.httpserver import HTTPServer
import logging
import webbrowser
import uuid
import ssl
import getpass
import os
import shutil
import stat
import socket
import sys
tornado.options.define("debug", default=False, help="Debug mode")
tornado.options.define("more", default=False,
help="Debug mode with more verbosity")
tornado.options.define("unminified", default=False,
help="Use the unminified js (for development only)")
tornado.options.define("host", default='localhost', help="Server host")
tornado.options.define("port", default=57575, type=int, help="Server port")
tornado.options.define("keepalive_interval", default=30, type=int,
help="Interval between ping packets sent from server "
"to client (in seconds)")
tornado.options.define("one_shot", default=False,
help="Run a one-shot instance. Quit at term close")
tornado.options.define("shell", help="Shell to execute at login")
tornado.options.define("motd", default='motd', help="Path to the motd file.")
tornado.options.define("cmd",
help="Command to run instead of shell, f.i.: 'ls -l'")
tornado.options.define("unsecure", default=False,
help="Don't use ssl not recommended")
tornado.options.define("i_hereby_declare_i_dont_want_any_security_whatsoever",
default=False,
help="Remove all security and warnings. There are some "
"use cases for that. Use this if you really know what "
"you are doing.")
tornado.options.define("login", default=False,
help="Use login screen at start")
tornado.options.define("pam_profile", default="", type=str,
help="When --login=True provided and running as ROOT, "
"use PAM with the specified PAM profile for "
"authentication and then execute the user's default "
"shell. Will override --shell.")
tornado.options.define("force_unicode_width",
default=False,
help="Force all unicode characters to the same width."
"Useful for avoiding layout mess.")
tornado.options.define("ssl_version", default=None,
help="SSL protocol version")
tornado.options.define("generate_certs", default=False,
help="Generate butterfly certificates")
tornado.options.define("generate_current_user_pkcs", default=False,
help="Generate current user pfx for client "
"authentication")
tornado.options.define("generate_user_pkcs", default='',
help="Generate user pfx for client authentication "
"(Must be root to create for another user)")
tornado.options.define("uri_root_path", default='',
help="Sets the servier root path: "
"example.com/<uri_root_path>/static/")
if os.getuid() == 0:
ev = os.getenv('XDG_CONFIG_DIRS', '/etc')
else:
ev = os.getenv(
'XDG_CONFIG_HOME', os.path.join(
os.getenv('HOME', os.path.expanduser('~')),
'.config'))
butterfly_dir = os.path.join(ev, 'butterfly')
conf_file = os.path.join(butterfly_dir, 'butterfly.conf')
ssl_dir = os.path.join(butterfly_dir, 'ssl')
tornado.options.define("conf", default=conf_file,
help="Butterfly configuration file. "
"Contains the same options as command line.")
tornado.options.define("ssl_dir", default=ssl_dir,
help="Force SSL directory location")
# Do it once to get the conf path
tornado.options.parse_command_line()
if os.path.exists(tornado.options.options.conf):
tornado.options.parse_config_file(tornado.options.options.conf)
# Do it again to overwrite conf with args
tornado.options.parse_command_line()
# For next time, create them a conf file from template.
# Need to do this after parsing options so we do not trigger
# code import for butterfly module, in case that code is
# dependent on the set of parsed options.
if not os.path.exists(conf_file):
try:
import butterfly
shutil.copy(
os.path.join(
os.path.abspath(os.path.dirname(butterfly.__file__)),
'butterfly.conf.default'), conf_file)
print('butterfly.conf installed in %s' % conf_file)
except:
pass
options = tornado.options.options
for logger in ('tornado.access', 'tornado.application',
'tornado.general', 'butterfly'):
level = logging.WARNING
if options.debug:
level = logging.INFO
if options.more:
level = logging.DEBUG
logging.getLogger(logger).setLevel(level)
log = logging.getLogger('butterfly')
host = options.host
port = options.port
if options.i_hereby_declare_i_dont_want_any_security_whatsoever:
options.unsecure = True
if not os.path.exists(options.ssl_dir):
os.makedirs(options.ssl_dir)
def to_abs(file):
return os.path.join(options.ssl_dir, file)
ca, ca_key, cert, cert_key, pkcs12 = map(to_abs, [
'butterfly_ca.crt', 'butterfly_ca.key',
'butterfly_%s.crt', 'butterfly_%s.key',
'%s.p12'])
def fill_fields(subject):
subject.C = 'WW'
subject.O = 'Butterfly'
subject.OU = 'Butterfly Terminal'
subject.ST = 'World Wide'
subject.L = 'Terminal'
def write(file, content):
with open(file, 'wb') as fd:
fd.write(content)
print('Writing %s' % file)
def read(file):
print('Reading %s' % file)
with open(file, 'rb') as fd:
return fd.read()
def b(s):
return s.encode('utf-8')
if options.generate_certs:
from OpenSSL import crypto
print('Generating certificates for %s (change it with --host)\n' % host)
if not os.path.exists(ca) and not os.path.exists(ca_key):
print('Root certificate not found, generating it')
ca_pk = crypto.PKey()
ca_pk.generate_key(crypto.TYPE_RSA, 2048)
ca_cert = crypto.X509()
ca_cert.set_version(2)
ca_cert.get_subject().CN = 'Butterfly CA on %s' % socket.gethostname()
fill_fields(ca_cert.get_subject())
ca_cert.set_serial_number(uuid.uuid4().int)
ca_cert.gmtime_adj_notBefore(0) # From now
ca_cert.gmtime_adj_notAfter(315360000) # to 10y
ca_cert.set_issuer(ca_cert.get_subject()) # Self signed
ca_cert.set_pubkey(ca_pk)
ca_cert.add_extensions([
crypto.X509Extension(
b('basicConstraints'), True, b('CA:TRUE, pathlen:0')),
crypto.X509Extension(
b('keyUsage'), True, b('keyCertSign, cRLSign')),
crypto.X509Extension(
b('subjectKeyIdentifier'), False, b('hash'), subject=ca_cert),
])
ca_cert.add_extensions([
crypto.X509Extension(
b('authorityKeyIdentifier'), False,
b('issuer:always, keyid:always'),
issuer=ca_cert, subject=ca_cert
)
])
ca_cert.sign(ca_pk, 'sha512')
write(ca, crypto.dump_certificate(crypto.FILETYPE_PEM, ca_cert))
write(ca_key, crypto.dump_privatekey(crypto.FILETYPE_PEM, ca_pk))
os.chmod(ca_key, stat.S_IRUSR | stat.S_IWUSR) # 0o600 perms
else:
print('Root certificate found, using it')
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, read(ca))
ca_pk = crypto.load_privatekey(crypto.FILETYPE_PEM, read(ca_key))
server_pk = crypto.PKey()
server_pk.generate_key(crypto.TYPE_RSA, 2048)
server_cert = crypto.X509()
server_cert.set_version(2)
server_cert.get_subject().CN = host
server_cert.add_extensions([
crypto.X509Extension(
b('basicConstraints'), False, b('CA:FALSE')),
crypto.X509Extension(
b('subjectKeyIdentifier'), False, b('hash'), subject=server_cert),
crypto.X509Extension(
b('subjectAltName'), False, b('DNS:%s' % host)),
])
server_cert.add_extensions([
crypto.X509Extension(
b('authorityKeyIdentifier'), False,
b('issuer:always, keyid:always'),
issuer=ca_cert, subject=ca_cert
)
])
fill_fields(server_cert.get_subject())
server_cert.set_serial_number(uuid.uuid4().int)
server_cert.gmtime_adj_notBefore(0) # From now
server_cert.gmtime_adj_notAfter(315360000) # to 10y
server_cert.set_issuer(ca_cert.get_subject()) # Signed by ca
server_cert.set_pubkey(server_pk)
server_cert.sign(ca_pk, 'sha512')
write(cert % host, crypto.dump_certificate(
crypto.FILETYPE_PEM, server_cert))
write(cert_key % host, crypto.dump_privatekey(
crypto.FILETYPE_PEM, server_pk))
os.chmod(cert_key % host, stat.S_IRUSR | stat.S_IWUSR) # 0o600 perms
print('\nNow you can run --generate-user-pkcs=user '
'to generate user certificate.')
sys.exit(0)
if (options.generate_current_user_pkcs or
options.generate_user_pkcs):
from butterfly import utils
try:
current_user = utils.User()
except Exception:
current_user = None
from OpenSSL import crypto
if not all(map(os.path.exists, [ca, ca_key])):
print('Please generate certificates using --generate-certs before')
sys.exit(1)
if options.generate_current_user_pkcs:
user = current_user.name
else:
user = options.generate_user_pkcs
if user != current_user.name and current_user.uid != 0:
print('Cannot create certificate for another user with '
'current privileges.')
sys.exit(1)
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, read(ca))
ca_pk = crypto.load_privatekey(crypto.FILETYPE_PEM, read(ca_key))
client_pk = crypto.PKey()
client_pk.generate_key(crypto.TYPE_RSA, 2048)
client_cert = crypto.X509()
client_cert.set_version(2)
client_cert.get_subject().CN = user
fill_fields(client_cert.get_subject())
client_cert.set_serial_number(uuid.uuid4().int)
client_cert.gmtime_adj_notBefore(0) # From now
client_cert.gmtime_adj_notAfter(315360000) # to 10y
client_cert.set_issuer(ca_cert.get_subject()) # Signed by ca
client_cert.set_pubkey(client_pk)
client_cert.sign(client_pk, 'sha512')
client_cert.sign(ca_pk, 'sha512')
pfx = crypto.PKCS12()
pfx.set_certificate(client_cert)
pfx.set_privatekey(client_pk)
pfx.set_ca_certificates([ca_cert])
pfx.set_friendlyname(('%s cert for butterfly' % user).encode('utf-8'))
while True:
password = getpass.getpass('\nPKCS12 Password (can be blank): ')
password2 = getpass.getpass('Verify Password (can be blank): ')
if password == password2:
break
print('Passwords do not match.')
print('')
write(pkcs12 % user, pfx.export(password.encode('utf-8')))
os.chmod(pkcs12 % user, stat.S_IRUSR | stat.S_IWUSR) # 0o600 perms
sys.exit(0)
if options.unsecure:
ssl_opts = None
else:
if not all(map(os.path.exists, [cert % host, cert_key % host, ca])):
print("Unable to find butterfly certificate for host %s" % host)
print(cert % host)
print(cert_key % host)
print(ca)
print("Can't run butterfly without certificate.\n")
print("Either generate them using --generate-certs --host=host "
"or run as --unsecure (NOT RECOMMENDED)\n")
print("For more information go to http://paradoxxxzero.github.io/"
"2014/03/21/butterfly-with-ssl-auth.html\n")
sys.exit(1)
ssl_opts = {
'certfile': cert % host,
'keyfile': cert_key % host,
'ca_certs': ca,
'cert_reqs': ssl.CERT_REQUIRED
}
if options.ssl_version is not None:
if not hasattr(
ssl, 'PROTOCOL_%s' % options.ssl_version):
print(
"Unknown SSL protocol %s" %
options.ssl_version)
sys.exit(1)
ssl_opts['ssl_version'] = getattr(
ssl, 'PROTOCOL_%s' % options.ssl_version)
from butterfly import application
application.butterfly_dir = butterfly_dir
log.info('Starting server')
http_server = HTTPServer(application, ssl_options=ssl_opts)
http_server.listen(port, address=host)
if getattr(http_server, 'systemd', False):
os.environ.pop('LISTEN_PID')
os.environ.pop('LISTEN_FDS')
log.info('Starting loop')
ioloop = tornado.ioloop.IOLoop.instance()
if port == 0:
port = list(http_server._sockets.values())[0].getsockname()[1]
url = "http%s://%s:%d/%s" % (
"s" if not options.unsecure else "", host, port,
(options.uri_root_path.strip('/') + '/') if options.uri_root_path else ''
)
if not options.one_shot or not webbrowser.open(url):
log.warn('Butterfly is ready, open your browser to: %s' % url)
ioloop.start()
|
|
"""
Copyright 2017 Shaun Edwards
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import rospy
import rospkg
from threading import Thread
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QThread
from python_qt_binding.QtGui import QWidget, QPalette
from std_srvs.srv import Trigger
from packml_msgs.srv import Transition
from packml_msgs.srv import TransitionRequest
from packml_msgs.msg import Status
from packml_msgs.msg import State
from packml_msgs.msg import Mode
class Packml(Plugin):
def __init__(self, context):
super(Packml, self).__init__(context)
self.setObjectName('Packml')
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
# Create QWidget
self._widget = QWidget()
ui_file = os.path.join(rospkg.RosPack().get_path('packml_gui'), 'resource', 'packml.ui')
loadUi(ui_file, self._widget)
self._widget.setObjectName('Packml')
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
context.add_widget(self._widget)
# Custom code begins here
self._widget.reset_button.clicked[bool].connect(self.__handle_reset_clicked)
self._widget.start_button.clicked[bool].connect(self.__handle_start_clicked)
self._widget.stop_button.clicked[bool].connect(self.__handle_stop_clicked)
self._widget.clear_button.clicked[bool].connect(self.__handle_clear_clicked)
self._widget.hold_button.clicked[bool].connect(self.__handle_hold_clicked)
self._widget.unhold_button.clicked[bool].connect(self.__handle_unhold_clicked)
self._widget.suspend_button.clicked[bool].connect(self.__handle_suspend_clicked)
self._widget.unsuspend_button.clicked[bool].connect(self.__handle_unsuspend_clicked)
self._widget.abort_button.clicked[bool].connect(self.__handle_abort_clicked)
self._service_thread = Thread(target=self.wait_for_services, args=())
self._service_thread.start()
self._status_sub = rospy.Subscriber('packml/status', Status, self.status_callback)
def disable_all_buttons(self):
self._widget.clear_button.setEnabled(False)
self._widget.reset_button.setEnabled(False)
self._widget.start_button.setEnabled(False)
self._widget.stop_button.setEnabled(False)
self._widget.hold_button.setEnabled(False)
self._widget.suspend_button.setEnabled(False)
self._widget.unhold_button.setEnabled(False)
self._widget.unsuspend_button.setEnabled(False)
self._widget.abort_button.setEnabled(False)
def set_message_text(self, text):
self._widget.message_box.setText("Message: " + text)
def status_callback(self, msg):
self.update_button_states(msg.state.val)
self.update_status_fields(msg)
def update_button_states(self, state):
self.disable_all_buttons()
if state == State.ABORTED:
self._widget.clear_button.setEnabled(True)
elif state == State.STOPPED:
self._widget.reset_button.setEnabled(True)
elif state == State.IDLE:
self._widget.start_button.setEnabled(True)
elif state == State.EXECUTE:
self._widget.hold_button.setEnabled(True)
self._widget.suspend_button.setEnabled(True)
elif state == State.HELD:
self._widget.unhold_button.setEnabled(True)
elif state == State.SUSPENDED:
self._widget.unsuspend_button.setEnabled(True)
elif state == State.COMPLETE:
self._widget.reset_button.setEnabled(True)
if state != State.STOPPED and \
state != State.STOPPING and \
state != State.ABORTED and \
state != State.ABORTING and \
state != State.CLEARING:
self._widget.stop_button.setEnabled(True)
if state != State.ABORTED and \
state != State.ABORTING:
self._widget.abort_button.setEnabled(True)
def update_status_fields(self, msg):
self.update_state_field(msg.state.val)
self._widget.substate.setText(str(msg.sub_state))
self.update_mode_field(msg.mode.val)
self._widget.error_code.setText(str(msg.error))
self._widget.suberror_code.setText(str(msg.sub_error))
def update_state_field(self, state):
if state == State.UNDEFINED:
self._widget.state_name.setText("UNDEFINED")
elif state == State.OFF:
self._widget.state_name.setText("OFF")
elif state == State.STOPPED:
self._widget.state_name.setText("STOPPED")
elif state == State.STARTING:
self._widget.state_name.setText("STARTING")
elif state == State.IDLE:
self._widget.state_name.setText("IDLE")
elif state == State.SUSPENDED:
self._widget.state_name.setText("SUSPENDED")
elif state == State.EXECUTE:
self._widget.state_name.setText("EXECUTE")
elif state == State.STOPPING:
self._widget.state_name.setText("STOPPING")
elif state == State.ABORTING:
self._widget.state_name.setText("ABORTING")
elif state == State.ABORTED:
self._widget.state_name.setText("ABORTED")
elif state == State.HOLDING:
self._widget.state_name.setText("HOLDING")
elif state == State.HELD:
self._widget.state_name.setText("HELD")
elif state == State.RESETTING:
self._widget.state_name.setText("RESETTING")
elif state == State.SUSPENDING:
self._widget.state_name.setText("SUSPENDING")
elif state == State.UNSUSPENDING:
self._widget.state_name.setText("UNSUSPENDING")
elif state == State.CLEARING:
self._widget.state_name.setText("CLEARING")
elif state == State.UNHOLDING:
self._widget.state_name.setText("UNHOLDING")
elif state == State.COMPLETING:
self._widget.state_name.setText("COMPLETING")
elif state == State.COMPLETE:
self._widget.state_name.setText("COMPLETE")
else:
self._widget.state_name.setTest("UNKNOWN")
def update_mode_field(self, mode):
if mode == Mode.UNDEFINED:
self._widget.mode_name.setText("UNDEFINED")
elif mode == Mode.AUTOMATIC:
self._widget.mode_name.setText("AUTOMATIC")
elif mode == Mode.SEMI_AUTOMATIC:
self._widget.mode_name.setText("SEMI-AUTOMATIC")
elif mode == Mode.MANUAL:
self._widget.mode_name.setText("MANUAL")
elif mode == Mode.IDLE:
self._widget.mode_name.setText("IDLE")
elif mode == Mode.SETUP:
self._widget.mode_name.setText("SETUP")
else:
self._widget.mode_name.setText("UNKNOWN")
def wait_for_services(self):
self._widget.setEnabled(False)
transition_service_name = 'packml/transition'
rospy.wait_for_service(transition_service_name, 30)
self.transition_service = rospy.ServiceProxy(transition_service_name, Transition)
self._widget.setEnabled(True)
def shutdown_plugin(self):
self._status_sub.unregister()
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
def __handle_start_clicked(self, checked):
rospy.loginfo("Start button press")
res = self.transition_service(TransitionRequest.START)
self.set_message_text(res.message)
def __handle_stop_clicked(self, checked):
rospy.loginfo("Stop button press")
res = self.transition_service(TransitionRequest.STOP)
self.set_message_text(res.message)
def __handle_reset_clicked(self, checked):
rospy.loginfo("Reset button press")
res = self.transition_service(TransitionRequest.RESET)
self.set_message_text(res.message)
def __handle_clear_clicked(self, checked):
rospy.loginfo("Clear button press")
res = self.transition_service(TransitionRequest.CLEAR)
self.set_message_text(res.message)
def __handle_hold_clicked(self, checked):
rospy.loginfo("Hold button press")
res = self.transition_service(TransitionRequest.HOLD)
self.set_message_text(res.message)
def __handle_unhold_clicked(self, checked):
rospy.loginfo("Unhold button press")
res = self.transition_service(TransitionRequest.UNHOLD)
self.set_message_text(res.message)
def __handle_suspend_clicked(self, checked):
rospy.loginfo("Suspend button press")
res = self.transition_service(TransitionRequest.SUSPEND)
self.set_message_text(res.message)
def __handle_unsuspend_clicked(self, checked):
rospy.loginfo("Unsuspend button press")
res = self.transition_service(TransitionRequest.UNSUSPEND)
self.set_message_text(res.message)
def __handle_abort_clicked(self, checked):
rospy.loginfo("Abort button press")
res = self.transition_service(TransitionRequest.ABORT)
self.set_message_text(res.message)
@staticmethod
def add_arguments(parser):
rospy.loginfo("Add arguments callback")
group = parser.add_argument_group('Options for PackML plugin')
group.add_argument('--arg1', action='store_true', help='arg1 help')
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure it
# Usually used to open a configuration dialog
|
|
## This job takes in the MIMIC-III CSV.gz files from the specified input bucket
## and converts them to Parquet format in the specified output bucket
## Author: James Wiggins (wiggjame@amazon.com)
## Date: 6/3/19
## Revision: 1
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql.functions import *
from pyspark.sql.types import *
import os.path
from os import path
import boto3
s3 = boto3.resource('s3')
glueContext = GlueContext(SparkContext.getOrCreate())
spark = glueContext.spark_session
# Specify MIMIC data input and output S3 buckets
mimiccsvinputbucket='mimic-iii-physionet'
mimicparquetoutputbucket='mimic-iii-physionet'
mimicparquetoutputprefix='parquet/'
# ADMISSIONS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("admittime", TimestampType()),
StructField("dischtime", TimestampType()),
StructField("deathtime", TimestampType()),
StructField("admission_type", StringType()),
StructField("admission_location", StringType()),
StructField("discharge_location", StringType()),
StructField("insurance", StringType()),
StructField("language", StringType()),
StructField("religion", StringType()),
StructField("marital_status", StringType()),
StructField("ethnicity", StringType()),
StructField("edregtime", TimestampType()),
StructField("edouttime", TimestampType()),
StructField("diagnosis", StringType()),
StructField("hospital_expire_flag", ShortType()),
StructField("has_chartevents_data", ShortType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/ADMISSIONS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'ADMISSIONS', compression="snappy", mode="Overwrite")
# CALLOUT table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("submit_wardid", IntegerType()),
StructField("submit_careunit", StringType()),
StructField("curr_wardid", IntegerType()),
StructField("curr_careunit", StringType()),
StructField("callout_wardid", IntegerType()),
StructField("callout_service", StringType()),
StructField("request_tele", ShortType()),
StructField("request_resp", ShortType()),
StructField("request_cdiff", ShortType()),
StructField("request_mrsa", ShortType()),
StructField("request_vre", ShortType()),
StructField("callout_status", StringType()),
StructField("callout_outcome", StringType()),
StructField("discharge_wardid", IntegerType()),
StructField("acknowledge_status", StringType()),
StructField("createtime", TimestampType()),
StructField("updatetime", TimestampType()),
StructField("acknowledgetime", TimestampType()),
StructField("outcometime", TimestampType()),
StructField("firstreservationtime", TimestampType()),
StructField("currentreservationtime", TimestampType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/CALLOUT.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'CALLOUT', compression="snappy", mode="Overwrite")
# CAREGIVERS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("cgid", IntegerType()),
StructField("label", StringType()),
StructField("description", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/CAREGIVERS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'CAREGIVERS', compression="snappy", mode="Overwrite")
# CHARTEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("itemid", IntegerType()),
StructField("charttime", TimestampType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("value", StringType()),
StructField("valuenum", DoubleType()),
StructField("valueuom", StringType()),
StructField("warning", IntegerType()),
StructField("error", IntegerType()),
StructField("resultstatus", StringType()),
StructField("stopped", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/CHARTEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'CHARTEVENTS', compression="snappy", mode="Overwrite")
# CPTEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("costcenter", StringType()),
StructField("chartdate", TimestampType()),
StructField("cpt_cd", StringType()),
StructField("cpt_number", IntegerType()),
StructField("cpt_suffix", StringType()),
StructField("ticket_id_seq", IntegerType()),
StructField("sectionheader", StringType()),
StructField("subsectionheader", StringType()),
StructField("description", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/CPTEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'CPTEVENTS', compression="snappy", mode="Overwrite")
# D_CPT table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("category", ShortType()),
StructField("sectionrange", StringType()),
StructField("sectionheader", StringType()),
StructField("subsectionrange", StringType()),
StructField("subsectionheader", StringType()),
StructField("codesuffix", StringType()),
StructField("mincodeinsubsection", IntegerType()),
StructField("maxcodeinsubsection", IntegerType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/D_CPT.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'D_CPT', compression="snappy", mode="Overwrite")
# D_ICD_DIAGNOSES table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("icd9_code", StringType()),
StructField("short_title", StringType()),
StructField("long_title", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/D_ICD_DIAGNOSES.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'D_ICD_DIAGNOSES', compression="snappy", mode="Overwrite")
# D_ICD_PROCEDURES table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("icd9_code", StringType()),
StructField("short_title", StringType()),
StructField("long_title", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/D_ICD_PROCEDURES.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'D_ICD_PROCEDURES', compression="snappy", mode="Overwrite")
# D_ITEMS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("itemid", IntegerType()),
StructField("label", StringType()),
StructField("abbreviation", StringType()),
StructField("dbsource", StringType()),
StructField("linksto", StringType()),
StructField("category", StringType()),
StructField("unitname", StringType()),
StructField("param_type", StringType()),
StructField("conceptid", IntegerType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/D_ITEMS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'D_ITEMS', compression="snappy", mode="Overwrite")
# D_LABITEMS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("itemid", IntegerType()),
StructField("label", StringType()),
StructField("fluid", StringType()),
StructField("category", StringType()),
StructField("loinc_code", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/D_LABITEMS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'D_LABITEMS', compression="snappy", mode="Overwrite")
# DATETIMEEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("itemid", IntegerType()),
StructField("charttime", TimestampType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("value", StringType()),
StructField("valueuom", StringType()),
StructField("warning", IntegerType()),
StructField("error", IntegerType()),
StructField("resultstatus", StringType()),
StructField("stopped", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/DATETIMEEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'DATETIMEEVENTS', compression="snappy", mode="Overwrite")
# DIAGNOSES_ICD table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("seq_num", IntegerType()),
StructField("icd9_code", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/DIAGNOSES_ICD.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'DIAGNOSES_ICD', compression="snappy", mode="Overwrite")
# DRGCODES table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("drg_type", StringType()),
StructField("drg_code", StringType()),
StructField("description", StringType()),
StructField("drg_severity", ShortType()),
StructField("drg_mortality", ShortType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/DRGCODES.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'DRGCODES', compression="snappy", mode="Overwrite")
# ICUSTAYS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("dbsource", StringType()),
StructField("first_careunit", StringType()),
StructField("last_careunit", StringType()),
StructField("first_wardid", ShortType()),
StructField("last_wardid", ShortType()),
StructField("intime", TimestampType()),
StructField("outtime", TimestampType()),
StructField("los", DoubleType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/ICUSTAYS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'ICUSTAYS', compression="snappy", mode="Overwrite")
# INPUTEVENTS_CV table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("charttime", TimestampType()),
StructField("itemid", IntegerType()),
StructField("amount", DoubleType()),
StructField("amountuom", StringType()),
StructField("rate", DoubleType()),
StructField("rateuom", StringType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("orderid", IntegerType()),
StructField("linkorderid", IntegerType()),
StructField("stopped", StringType()),
StructField("newbottle", IntegerType()),
StructField("originalamount", DoubleType()),
StructField("originalamountuom", StringType()),
StructField("originalroute", StringType()),
StructField("originalrate", DoubleType()),
StructField("originalrateuom", StringType()),
StructField("originalsite", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/INPUTEVENTS_CV.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'INPUTEVENTS_CV', compression="snappy", mode="Overwrite")
# INPUTEVENTS_MV table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("starttime", TimestampType()),
StructField("endtime", TimestampType()),
StructField("itemid", IntegerType()),
StructField("amount", DoubleType()),
StructField("amountuom", StringType()),
StructField("rate", DoubleType()),
StructField("rateuom", StringType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("orderid", IntegerType()),
StructField("linkorderid", IntegerType()),
StructField("ordercategoryname", StringType()),
StructField("secondaryordercategoryname", StringType()),
StructField("ordercomponenttypedescription", StringType()),
StructField("ordercategorydescription", StringType()),
StructField("patientweight", DoubleType()),
StructField("totalamount", DoubleType()),
StructField("totalamountuom", StringType()),
StructField("isopenbag", ShortType()),
StructField("continueinnextdept", ShortType()),
StructField("cancelreason", ShortType()),
StructField("statusdescription", StringType()),
StructField("comments_editedby", StringType()),
StructField("comments_canceledby", StringType()),
StructField("comments_date", TimestampType()),
StructField("originalamount", DoubleType()),
StructField("originalrate", DoubleType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/INPUTEVENTS_MV.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'INPUTEVENTS_MV', compression="snappy", mode="Overwrite")
# LABEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("itemid", IntegerType()),
StructField("charttime", TimestampType()),
StructField("value", StringType()),
StructField("valuenum", DoubleType()),
StructField("valueuom", StringType()),
StructField("flag", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/LABEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'LABEVENTS', compression="snappy", mode="Overwrite")
# MICROBIOLOGYEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("chartdate", TimestampType()),
StructField("charttime", TimestampType()),
StructField("spec_itemid", IntegerType()),
StructField("spec_type_desc", StringType()),
StructField("org_itemid", IntegerType()),
StructField("org_name", StringType()),
StructField("isolate_num", ShortType()),
StructField("ab_itemid", IntegerType()),
StructField("ab_name", StringType()),
StructField("dilution_text", StringType()),
StructField("dilution_comparison", StringType()),
StructField("dilution_value", DoubleType()),
StructField("interpretation", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/MICROBIOLOGYEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'MICROBIOLOGYEVENTS', compression="snappy", mode="Overwrite")
# NOTEEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("chartdate", TimestampType()),
StructField("charttime", TimestampType()),
StructField("storetime", TimestampType()),
StructField("category", StringType()),
StructField("description", StringType()),
StructField("cgid", IntegerType()),
StructField("iserror", StringType()),
StructField("text", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/NOTEEVENTS.csv.gz',\
header=True,\
schema=schema,\
multiLine=True,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'NOTEEVENTS', compression="snappy", mode="Overwrite")
# OUTPUTEVENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("charttime", TimestampType()),
StructField("itemid", IntegerType()),
StructField("value", DoubleType()),
StructField("valueuom", StringType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("stopped", StringType()),
StructField("newbottle", StringType()),
StructField("iserror", IntegerType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/OUTPUTEVENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'OUTPUTEVENTS', compression="snappy", mode="Overwrite")
# PATIENTS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("gender", StringType()),
StructField("dob", TimestampType()),
StructField("dod", TimestampType()),
StructField("dod_hosp", TimestampType()),
StructField("dod_ssn", TimestampType()),
StructField("expire_flag", IntegerType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/PATIENTS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'PATIENTS', compression="snappy", mode="Overwrite")
# PRESCRIPTIONS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("startdate", TimestampType()),
StructField("enddate", TimestampType()),
StructField("drug_type", StringType()),
StructField("drug", StringType()),
StructField("drug_name_poe", StringType()),
StructField("drug_name_generic", StringType()),
StructField("formulary_drug_cd", StringType()),
StructField("gsn", StringType()),
StructField("ndc", StringType()),
StructField("prod_strength", StringType()),
StructField("dose_val_rx", StringType()),
StructField("dose_unit_rx", StringType()),
StructField("form_val_disp", StringType()),
StructField("form_unit_disp", StringType()),
StructField("route", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/PRESCRIPTIONS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'PRESCRIPTIONS', compression="snappy", mode="Overwrite")
# PROCEDUREEVENTS_MV table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("starttime", TimestampType()),
StructField("endtime", TimestampType()),
StructField("itemid", IntegerType()),
StructField("value", DoubleType()),
StructField("valueuom", StringType()),
StructField("location", StringType()),
StructField("locationcategory", StringType()),
StructField("storetime", TimestampType()),
StructField("cgid", IntegerType()),
StructField("orderid", IntegerType()),
StructField("linkorderid", IntegerType()),
StructField("ordercategoryname", StringType()),
StructField("secondaryordercategoryname", StringType()),
StructField("ordercategorydescription", StringType()),
StructField("isopenbag", ShortType()),
StructField("continueinnextdept", ShortType()),
StructField("cancelreason", ShortType()),
StructField("statusdescription", StringType()),
StructField("comments_editedby", StringType()),
StructField("comments_canceledby", StringType()),
StructField("comments_date", TimestampType()),
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/PROCEDUREEVENTS_MV.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'PROCEDUREEVENTS_MV', compression="snappy", mode="Overwrite")
# PROCEDURES_ICD table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("seq_num", IntegerType()),
StructField("icd9_code", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/PROCEDURES_ICD.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'PROCEDURES_ICD', compression="snappy", mode="Overwrite")
# SERVICES table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("transfertime", TimestampType()),
StructField("prev_service", StringType()),
StructField("curr_service", StringType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/SERVICES.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'SERVICES', compression="snappy", mode="Overwrite")
# TRANSFERS table parquet transformation
schema = StructType([
StructField("row_id", IntegerType()),
StructField("subject_id", IntegerType()),
StructField("hadm_id", IntegerType()),
StructField("icustay_id", IntegerType()),
StructField("dbsource", StringType()),
StructField("eventtype", StringType()),
StructField("prev_careunit", StringType()),
StructField("curr_careunit", StringType()),
StructField("prev_wardid", ShortType()),
StructField("curr_wardid", ShortType()),
StructField("intime", TimestampType()),
StructField("outtime", TimestampType()),
StructField("los", DoubleType())
])
df = spark.read.csv('s3://'+mimiccsvinputbucket+'/TRANSFERS.csv.gz',\
header=True,\
schema=schema,\
quote='"',\
escape='"')
df.write.parquet('s3://'+mimicparquetoutputbucket+'/'+mimicparquetoutputprefix+'TRANSFERS', compression="snappy", mode="Overwrite")
|
|
from typing import Any, Mapping, Union
from unittest import mock
import orjson
from django.conf import settings
from django.test import override_settings
from zerver.lib.actions import do_create_user, get_service_bot_events
from zerver.lib.bot_config import ConfigError, load_bot_config_template, set_bot_config
from zerver.lib.bot_lib import EmbeddedBotEmptyRecipientsList, EmbeddedBotHandler, StateHandler
from zerver.lib.bot_storage import StateError
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.validator import check_string
from zerver.models import Recipient, UserProfile, get_realm
BOT_TYPE_TO_QUEUE_NAME = {
UserProfile.OUTGOING_WEBHOOK_BOT: 'outgoing_webhooks',
UserProfile.EMBEDDED_BOT: 'embedded_bots',
}
class TestServiceBotBasics(ZulipTestCase):
def _get_outgoing_bot(self) -> UserProfile:
outgoing_bot = do_create_user(
email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.example_user('cordelia'),
)
return outgoing_bot
def test_service_events_for_pms(self) -> None:
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
assert outgoing_bot.bot_type is not None
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
active_user_ids={outgoing_bot.id},
mentioned_user_ids=set(),
recipient_type=Recipient.PERSONAL,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='private_message', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
def test_spurious_mentions(self) -> None:
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
assert outgoing_bot.bot_type is not None
# If outgoing_bot is not in mentioned_user_ids,
# we will skip over it. This tests an anomaly
# of the code that our query for bots can include
# bots that may not actually be mentioned, and it's
# easiest to just filter them in get_service_bot_events.
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
active_user_ids={outgoing_bot.id},
mentioned_user_ids=set(),
recipient_type=Recipient.STREAM,
)
self.assertEqual(len(event_dict), 0)
def test_service_events_for_stream_mentions(self) -> None:
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
assert outgoing_bot.bot_type is not None
cordelia = self.example_user('cordelia')
red_herring_bot = self.create_test_bot(
short_name='whatever',
user_profile=cordelia,
)
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
(red_herring_bot.id, UserProfile.OUTGOING_WEBHOOK_BOT),
],
active_user_ids=set(),
mentioned_user_ids={outgoing_bot.id},
recipient_type=Recipient.STREAM,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='mention', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
def test_service_events_for_private_mentions(self) -> None:
"""Service bots should not get access to mentions if they aren't a
direct recipient."""
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
assert outgoing_bot.bot_type is not None
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
active_user_ids=set(),
mentioned_user_ids={outgoing_bot.id},
recipient_type=Recipient.PERSONAL,
)
self.assertEqual(len(event_dict), 0)
def test_service_events_with_unexpected_bot_type(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
bot = self.create_test_bot(
short_name='whatever',
user_profile=cordelia,
)
wrong_bot_type = UserProfile.INCOMING_WEBHOOK_BOT
bot.bot_type = wrong_bot_type
bot.save()
with mock.patch('logging.error') as log_mock:
event_dict = get_service_bot_events(
sender=hamlet,
service_bot_tuples=[
(bot.id, wrong_bot_type),
],
active_user_ids=set(),
mentioned_user_ids={bot.id},
recipient_type=Recipient.PERSONAL,
)
self.assertEqual(len(event_dict), 0)
arg = log_mock.call_args_list[0][0][0]
self.assertIn('Unexpected bot_type', arg)
class TestServiceBotStateHandler(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("othello")
self.bot_profile = do_create_user(email="embedded-bot-1@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="EmbeddedBo1",
bot_type=UserProfile.EMBEDDED_BOT,
bot_owner=self.user_profile)
self.second_bot_profile = do_create_user(email="embedded-bot-2@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="EmbeddedBot2",
bot_type=UserProfile.EMBEDDED_BOT,
bot_owner=self.user_profile)
def test_basic_storage_and_retrieval(self) -> None:
storage = StateHandler(self.bot_profile)
storage.put('some key', 'some value')
storage.put('some other key', 'some other value')
self.assertEqual(storage.get('some key'), 'some value')
self.assertEqual(storage.get('some other key'), 'some other value')
self.assertTrue(storage.contains('some key'))
self.assertFalse(storage.contains('nonexistent key'))
self.assertRaisesMessage(StateError,
"Key does not exist.",
lambda: storage.get('nonexistent key'))
storage.put('some key', 'a new value')
self.assertEqual(storage.get('some key'), 'a new value')
second_storage = StateHandler(self.second_bot_profile)
self.assertRaises(StateError, lambda: second_storage.get('some key'))
second_storage.put('some key', 'yet another value')
self.assertEqual(storage.get('some key'), 'a new value')
self.assertEqual(second_storage.get('some key'), 'yet another value')
def test_marshaling(self) -> None:
storage = StateHandler(self.bot_profile)
serializable_obj = {'foo': 'bar', 'baz': [42, 'cux']}
storage.put('some key', serializable_obj)
self.assertEqual(storage.get('some key'), serializable_obj)
# Reduce maximal storage size for faster test string construction.
@override_settings(USER_STATE_SIZE_LIMIT=100)
def test_storage_limit(self) -> None:
storage = StateHandler(self.bot_profile)
# Disable marshaling for storing a string whose size is
# equivalent to the size of the stored object.
storage.marshal = lambda obj: check_string("obj", obj)
storage.demarshal = lambda obj: obj
key = 'capacity-filling entry'
storage.put(key, 'x' * (settings.USER_STATE_SIZE_LIMIT - len(key)))
with self.assertRaisesMessage(StateError, "Request exceeds storage limit by 32 characters. "
"The limit is 100 characters."):
storage.put('too much data', 'a few bits too long')
second_storage = StateHandler(self.second_bot_profile)
second_storage.put('another big entry', 'x' * (settings.USER_STATE_SIZE_LIMIT - 40))
second_storage.put('normal entry', 'abcd')
def test_entry_removal(self) -> None:
storage = StateHandler(self.bot_profile)
storage.put('some key', 'some value')
storage.put('another key', 'some value')
self.assertTrue(storage.contains('some key'))
self.assertTrue(storage.contains('another key'))
storage.remove('some key')
self.assertFalse(storage.contains('some key'))
self.assertTrue(storage.contains('another key'))
self.assertRaises(StateError, lambda: storage.remove('some key'))
def test_internal_endpoint(self) -> None:
self.login_user(self.user_profile)
# Store some data.
initial_dict = {'key 1': 'value 1', 'key 2': 'value 2', 'key 3': 'value 3'}
params = {
'storage': orjson.dumps(initial_dict).decode(),
}
result = self.client_put('/json/bot_storage', params)
self.assert_json_success(result)
# Assert the stored data for some keys.
params = {
'keys': orjson.dumps(['key 1', 'key 3']).decode(),
}
result = self.client_get('/json/bot_storage', params)
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], {'key 3': 'value 3', 'key 1': 'value 1'})
# Assert the stored data for all keys.
result = self.client_get('/json/bot_storage')
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], initial_dict)
# Store some more data; update an entry and store a new entry
dict_update = {'key 1': 'new value', 'key 4': 'value 4'}
params = {
'storage': orjson.dumps(dict_update).decode(),
}
result = self.client_put('/json/bot_storage', params)
self.assert_json_success(result)
# Assert the data was updated.
updated_dict = initial_dict.copy()
updated_dict.update(dict_update)
result = self.client_get('/json/bot_storage')
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], updated_dict)
# Assert errors on invalid requests.
invalid_params = {
'keys': ["This is a list, but should be a serialized string."],
}
result = self.client_get('/json/bot_storage', invalid_params)
self.assert_json_error(result, 'Argument "keys" is not valid JSON.')
params = {
'keys': orjson.dumps(["key 1", "nonexistent key"]).decode(),
}
result = self.client_get('/json/bot_storage', params)
self.assert_json_error(result, "Key does not exist.")
params = {
'storage': orjson.dumps({'foo': [1, 2, 3]}).decode(),
}
result = self.client_put('/json/bot_storage', params)
self.assert_json_error(result, "storage contains a value that is not a string")
# Remove some entries.
keys_to_remove = ['key 1', 'key 2']
params = {
'keys': orjson.dumps(keys_to_remove).decode(),
}
result = self.client_delete('/json/bot_storage', params)
self.assert_json_success(result)
# Assert the entries were removed.
for key in keys_to_remove:
updated_dict.pop(key)
result = self.client_get('/json/bot_storage')
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], updated_dict)
# Try to remove an existing and a nonexistent key.
params = {
'keys': orjson.dumps(['key 3', 'nonexistent key']).decode(),
}
result = self.client_delete('/json/bot_storage', params)
self.assert_json_error(result, "Key does not exist.")
# Assert an error has been thrown and no entries were removed.
result = self.client_get('/json/bot_storage')
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], updated_dict)
# Remove the entire storage.
result = self.client_delete('/json/bot_storage')
self.assert_json_success(result)
# Assert the entire storage has been removed.
result = self.client_get('/json/bot_storage')
self.assert_json_success(result)
self.assertEqual(result.json()['storage'], {})
class TestServiceBotConfigHandler(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("othello")
self.bot_profile = self.create_test_bot('embedded', self.user_profile,
full_name='Embedded bot',
bot_type=UserProfile.EMBEDDED_BOT,
service_name='helloworld')
self.bot_handler = EmbeddedBotHandler(self.bot_profile)
def test_basic_storage_and_retrieval(self) -> None:
with self.assertRaises(ConfigError):
self.bot_handler.get_config_info('foo')
self.assertEqual(self.bot_handler.get_config_info('foo', optional=True), dict())
config_dict = {"entry 1": "value 1", "entry 2": "value 2"}
for key, value in config_dict.items():
set_bot_config(self.bot_profile, key, value)
self.assertEqual(self.bot_handler.get_config_info('foo'), config_dict)
config_update = {"entry 2": "new value", "entry 3": "value 3"}
for key, value in config_update.items():
set_bot_config(self.bot_profile, key, value)
config_dict.update(config_update)
self.assertEqual(self.bot_handler.get_config_info('foo'), config_dict)
@override_settings(BOT_CONFIG_SIZE_LIMIT=100)
def test_config_entry_limit(self) -> None:
set_bot_config(self.bot_profile, "some key", 'x' * (settings.BOT_CONFIG_SIZE_LIMIT-8))
self.assertRaisesMessage(ConfigError,
"Cannot store configuration. Request would require 101 characters. "
"The current configuration size limit is 100 characters.",
lambda: set_bot_config(self.bot_profile, "some key", 'x' * (settings.BOT_CONFIG_SIZE_LIMIT-8+1)))
set_bot_config(self.bot_profile, "some key", 'x' * (settings.BOT_CONFIG_SIZE_LIMIT-20))
set_bot_config(self.bot_profile, "another key", 'x')
self.assertRaisesMessage(ConfigError,
"Cannot store configuration. Request would require 116 characters. "
"The current configuration size limit is 100 characters.",
lambda: set_bot_config(self.bot_profile, "yet another key", 'x'))
def test_load_bot_config_template(self) -> None:
bot_config = load_bot_config_template('giphy')
self.assertTrue(isinstance(bot_config, dict))
self.assertEqual(len(bot_config), 1)
def test_load_bot_config_template_for_bot_without_config_data(self) -> None:
bot_config = load_bot_config_template('converter')
self.assertTrue(isinstance(bot_config, dict))
self.assertEqual(len(bot_config), 0)
def test_bot_send_pm_with_empty_recipients_list(self) -> None:
with self.assertRaisesRegex(EmbeddedBotEmptyRecipientsList, 'Message must have recipients!'):
self.bot_handler.send_message(message={'type': 'private', 'to': []})
class TestServiceBotEventTriggers(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("othello")
self.bot_profile = do_create_user(email="foo-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="FooBot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
self.second_bot_profile = do_create_user(email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_stream_mention_from_user(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
content = '@**FooBot** foo bar!!!'
recipient = 'Denmark'
trigger = 'mention'
message_type = Recipient._type_names[Recipient.STREAM]
def check_values_passed(
queue_name: Any,
trigger_event: Union[Mapping[Any, Any], Any],
) -> None:
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["message"]["content"], content)
self.assertEqual(trigger_event["message"]["display_recipient"], recipient)
self.assertEqual(trigger_event["message"]["sender_email"], self.user_profile.email)
self.assertEqual(trigger_event["message"]["type"], message_type)
self.assertEqual(trigger_event['trigger'], trigger)
self.assertEqual(trigger_event['user_profile_id'], self.bot_profile.id)
mock_queue_json_publish.side_effect = check_values_passed
self.send_stream_message(
self.user_profile,
'Denmark',
content)
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_message_without_mention(self, mock_queue_json_publish: mock.Mock) -> None:
sender = self.user_profile
self.send_stream_message(sender, "Denmark")
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_mention_from_bot(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.send_stream_message(
self.second_bot_profile,
'Denmark',
'@**FooBot** foo bar!!!')
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_personal_message_from_user(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender = self.user_profile
recipient = self.bot_profile
def check_values_passed(
queue_name: Any,
trigger_event: Union[Mapping[Any, Any], Any],
) -> None:
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["user_profile_id"], self.bot_profile.id)
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["message"]["sender_email"], sender.email)
display_recipients = [
trigger_event["message"]["display_recipient"][0]["email"],
trigger_event["message"]["display_recipient"][1]["email"],
]
self.assertTrue(sender.email in display_recipients)
self.assertTrue(recipient.email in display_recipients)
mock_queue_json_publish.side_effect = check_values_passed
self.send_personal_message(sender, recipient, 'test')
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_personal_message_from_bot(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender = self.second_bot_profile
recipient = self.bot_profile
self.send_personal_message(sender, recipient)
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_huddle_message_from_user(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.second_bot_profile.bot_type = bot_type
self.second_bot_profile.save()
sender = self.user_profile
recipients = [self.bot_profile, self.second_bot_profile]
profile_ids = [self.bot_profile.id, self.second_bot_profile.id]
def check_values_passed(
queue_name: Any,
trigger_event: Union[Mapping[Any, Any], Any],
) -> None:
self.assertEqual(queue_name, expected_queue_name)
self.assertIn(trigger_event["user_profile_id"], profile_ids)
profile_ids.remove(trigger_event["user_profile_id"])
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["message"]["sender_email"], sender.email)
self.assertEqual(trigger_event["message"]["type"], 'private')
mock_queue_json_publish.side_effect = check_values_passed
self.send_huddle_message(sender, recipients, 'test')
self.assertEqual(mock_queue_json_publish.call_count, 2)
mock_queue_json_publish.reset_mock()
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_huddle_message_from_bot(self, mock_queue_json_publish: mock.Mock) -> None:
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender = self.second_bot_profile
recipients = [self.user_profile, self.bot_profile]
self.send_huddle_message(sender, recipients)
self.assertFalse(mock_queue_json_publish.called)
|
|
## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: views/window.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
# Import PyQt5 modules
from PyQt5.QtCore import Qt, QTimer, QElapsedTimer
from PyQt5.QtWidgets import (QWidget,
QFrame,
QHBoxLayout,
QVBoxLayout,
QScrollArea,
QDesktopWidget)
# Import Coublet modules
from views.vars import *
from models.cache import CACHE
from models.api import CoubAPI
from widgets.handler import CoubletMouseEventHandler
from widgets.button import (CoubletButtonWidget,
VERTICAL,
HORIZONTAL,
ICON_AND_LABEL,
LABEL_AND_ICON)
#------------------------------------------------------------------------------#
class CoubletWindowView(QWidget):
SCROLL_POSITIVE = 30
SCROLL_NEGATIVE = -SCROLL_POSITIVE
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, presenter, title):
super().__init__(None)
# Store static values
self._presenter = presenter
self._title = title.upper()
self.setWindowTitle(title)
self._buttons = []
self._stream = None
# Build GUI
self._build_gui()
# Overload closing and scrolling event, and rename it just
# for the sake of under-scored names ;)
self.closeEvent = self.on_exit
self.wheelEvent = self.on_mouse_scroll
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_exit(self, event):
# Get current dimension and store in cache
dim = self.geometry()
CACHE['dimension'] = dim.x(), dim.y(), dim.width(), dim.height()
# TODO: this call is at the wrong place
CACHE.save()
# Exit
event.accept()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_mouse_scroll(self, event):
# Get the "stregth" of scroll
dy = event.pixelDelta().y()
# If "hard" enoough downward
if dy < self.SCROLL_NEGATIVE:
self._presenter.load_posts()
# If "hard" enough upward
elif dy > self.SCROLL_POSITIVE:
self._presenter.sync_posts()
# Kill posts in stream which are not visible
self._presenter.reset_unseen_posts()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_menu_button_pressed(self, index):
# Report event to presenter
self._presenter.set_active_stream(index)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def remove_stream(self, index):
# Set button deselected
self._buttons[index].deselect()
# Remove stream from layout
self._posts.takeAt(1)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_stream(self, index, stream):
# Set button selected
self._buttons[index].select()
# Indicate change in window title too
self.setWindowTitle(
'{} | {}'.format(self._title, CoubAPI.STREAM_NAMES[index].upper()))
# Set stream to layout
self._posts.insertLayout(1, stream)
self._stream = stream
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def get_scroll_position(self):
# Get position of scroll bar
return self._scroll_area.verticalScrollBar().sliderPosition()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_scroll_position(self, value):
# Set position of scroll bar
self._scroll_area.verticalScrollBar().setSliderPosition(value)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def show_scroll_indicators(self, up=False, down=False):
# Place scroll indicators
up_space = down_space = POST_SPACING_FULL
if up:
self._scroll_up.show()
up_space = 0
if down:
self._scroll_down.show()
down_space = 0
# Set leading and trailing padding
self._posts.setContentsMargins(0, up_space, 0, down_space)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def hide_scroll_indicators(self, up=False, down=False):
# Remove scroll indicators
up_space = down_space = 0
if up:
self._scroll_up.hide()
up_space = POST_SPACING_FULL
if down:
self._scroll_down.hide()
down_space = POST_SPACING_FULL
# Set leading and trailing padding
self._posts.setContentsMargins(0, up_space, 0, down_space)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _build_gui(self):
# Storages
buttons = self._buttons
# Unpack dimension data
x, y, width, height = CACHE['dimension']
# If position have not been set before
if x is NotImplemented:
screen = QDesktopWidget().screenGeometry()
x, y = (screen.width() - width) / 2, (screen.height() - height) / 2
# Set window position and dimension
self.setGeometry(x, y, width, height)
self.setFixedWidth(width)
# Create layout for the entire application and zero-out
self.layout = main_layout = QVBoxLayout()
main_layout.setSpacing(0)
main_layout.setContentsMargins(0, 0, 0, 0)
# Create and add scrollable area for streams
self._scroll_area = posts = QScrollArea()
posts.setWidgetResizable(True)
posts.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
posts.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
posts.setFrameShape(QFrame.NoFrame)
# Create a main-stream widget
main_stream = QWidget()
main_stream.setFixedWidth(width)
# TODO: rename self._posts to something meaningful
self._posts = posts_layout = QVBoxLayout()
posts_layout.setSpacing(POST_SPACING_FULL)
posts_layout.setContentsMargins(0, 0, 0, 0)
# HACK: in both scroll arrows the 'padding_left' value is a hack.
# The reason why the arrows are not aligned to the horizontal
# center is unknown as it looks like everything is set up properly
# Add scroll-up icon and text
self._scroll_up = CoubletButtonWidget(icon=CONSTANTS['icon_scroll_up'],
label='SCROLL UP TO REFRESH',
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
order=ICON_AND_LABEL,
orientation=VERTICAL,
spacing=SMALL_PADDING,
padding_top=POST_SPACING_FULL,
padding_left=8)
posts_layout.addWidget(self._scroll_up, alignment=Qt.AlignHCenter)
# Dynamic space
posts_layout.addStretch(0)
# Add scroll-down icon and text
self._scroll_down = CoubletButtonWidget(icon=CONSTANTS['icon_scroll_down'],
label='SCROLL DOWN TO LOAD MORE',
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
order=LABEL_AND_ICON,
orientation=VERTICAL,
spacing=SMALL_PADDING,
padding_bottom=POST_SPACING_FULL,
padding_left=8)
posts_layout.addWidget(self._scroll_down, alignment=Qt.AlignHCenter)
# Set posts' layout to stream, add stream to main layout
main_stream.setLayout(posts_layout)
posts.setWidget(main_stream)
main_layout.addWidget(posts)
# Create menu-bar
menu_bar = QWidget()
menu_bar.setPalette(CONSTANTS['panel_color_darker'])
menu_bar.setAutoFillBackground(True)
# Create layout for menu-bar and zero-out
menu_bar_layout = QVBoxLayout()
menu_bar_layout.setSpacing(0)
menu_bar_layout.setContentsMargins(0, 0, 0, 0)
# Create layout for menu buttons and zero-out
menu_buttons_layout = QHBoxLayout()
menu_buttons_layout.setSpacing(0)
menu_buttons_layout.setContentsMargins(0, 0, 0, 0)
# Add menu-buttons to menu-bar
menu_bar_layout.addSpacing(2*SMALL_PADDING)
menu_bar_layout.addLayout(menu_buttons_layout)
menu_bar_layout.addSpacing(2*SMALL_PADDING)
# Assign layout and add menu-bar to app
menu_bar.setLayout(menu_bar_layout)
main_layout.addWidget(menu_bar)
# Add buttons and spacess to menu-buttons layout
menu_buttons_layout.addSpacing(2*SMALL_PADDING)
# get default double-click interval
for i, menu_item in enumerate(CoubAPI.STREAM_NAMES):
# If not the first item, add
# auto-stretching before it
if i:
menu_buttons_layout.addStretch(0)
# Add menu item
icon_name = 'icon_' + menu_item
click = CoubletMouseEventHandler(l_single=lambda n=i: self.on_menu_button_pressed(n))
menu_button = CoubletButtonWidget(icon=CONSTANTS[icon_name],
icon_selected=CONSTANTS[icon_name + '_selected'],
label=menu_item.upper(),
order=ICON_AND_LABEL,
orientation=HORIZONTAL,
font=CONSTANTS['text_font_generic'],
palette=CONSTANTS['text_color_light'],
palette_selected=CONSTANTS['text_color_light_selected'],
spacing=SMALL_PADDING,
mouse_event_handler=click)
buttons.append(menu_button)
menu_buttons_layout.addWidget(menu_button)
# Tail padding
menu_buttons_layout.addSpacing(2*SMALL_PADDING)
self.setLayout(main_layout)
self.setPalette(CONSTANTS['panel_color_dark'])
|
|
import os
import time
import numpy as np
import argparse
import functools
import shutil
import math
import multiprocessing
import paddle
import paddle.fluid as fluid
import reader
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
from train import build_program
from train import train_parameters
from infer import draw_bounding_box_on_image
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('learning_rate', float, 0.0001, "Learning rate.")
add_arg('batch_size', int, 64, "Minibatch size.")
add_arg('epoc_num', int, 20, "Epoch number.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('parallel', bool, True, "Whether train in parallel on multi-devices.")
add_arg('model_save_dir', str, 'quant_model', "The path to save model.")
add_arg('init_model', str, 'ssd_mobilenet_v1_pascalvoc', "The init model path.")
add_arg('ap_version', str, '11point', "mAP version can be integral or 11point.")
add_arg('image_shape', str, '3,300,300', "Input image shape.")
add_arg('mean_BGR', str, '127.5,127.5,127.5', "Mean value for B,G,R channel which will be subtracted.")
add_arg('lr_epochs', str, '30,60', "The learning decay steps.")
add_arg('lr_decay_rates', str, '1,0.1,0.01', "The learning decay rates for each step.")
add_arg('data_dir', str, 'data/pascalvoc', "Data directory")
add_arg('act_quant_type', str, 'abs_max', "Quantize type of activation, whicn can be abs_max or range_abs_max")
add_arg('image_path', str, '', "The image used to inference and visualize.")
add_arg('confs_threshold', float, 0.5, "Confidence threshold to draw bbox.")
add_arg('mode', str, 'train', "Job mode can be one of ['train', 'test', 'infer'].")
#yapf: enable
def test(exe, test_prog, map_eval, test_py_reader):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
test_py_reader.start()
try:
batch = 0
while True:
test_map, = exe.run(test_prog, fetch_list=[accum_map])
if batch % 10 == 0:
print("Batch {0}, map {1}".format(batch, test_map))
batch += 1
except fluid.core.EOFException:
test_py_reader.reset()
finally:
test_py_reader.reset()
print("Test map {0}".format(test_map))
return test_map
def save_model(exe, main_prog, model_save_dir, postfix):
model_path = os.path.join(model_save_dir, postfix)
if os.path.isdir(model_path):
shutil.rmtree(model_path)
fluid.io.save_persistables(exe, model_path, main_program=main_prog)
def train(args,
data_args,
train_params,
train_file_list,
val_file_list):
model_save_dir = args.model_save_dir
init_model = args.init_model
epoc_num = args.epoc_num
use_gpu = args.use_gpu
parallel = args.parallel
is_shuffle = True
act_quant_type = args.act_quant_type
if use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
batch_size = train_params['batch_size']
batch_size_per_device = batch_size // devices_num
num_workers = 4
startup_prog = fluid.Program()
train_prog = fluid.Program()
test_prog = fluid.Program()
train_py_reader, loss = build_program(
main_prog=train_prog,
startup_prog=startup_prog,
train_params=train_params,
is_train=True)
test_py_reader, map_eval, _, _ = build_program(
main_prog=test_prog,
startup_prog=startup_prog,
train_params=train_params,
is_train=False)
test_prog = test_prog.clone(for_test=True)
transpiler = fluid.contrib.QuantizeTranspiler(weight_bits=8,
activation_bits=8,
activation_quantize_type=act_quant_type,
weight_quantize_type='abs_max')
transpiler.training_transpile(train_prog, startup_prog)
transpiler.training_transpile(test_prog, startup_prog)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
if init_model:
print('Load init model %s.' % init_model)
def if_exist(var):
return os.path.exists(os.path.join(init_model, var.name))
fluid.io.load_vars(exe, init_model, main_program=train_prog,
predicate=if_exist)
else:
print('There is no init model.')
if parallel:
train_exe = fluid.ParallelExecutor(main_program=train_prog,
use_cuda=True if use_gpu else False, loss_name=loss.name)
train_reader = reader.train(data_args,
train_file_list,
batch_size_per_device,
shuffle=is_shuffle,
num_workers=num_workers)
test_reader = reader.test(data_args, val_file_list, batch_size)
train_py_reader.decorate_paddle_reader(train_reader)
test_py_reader.decorate_paddle_reader(test_reader)
train_py_reader.start()
best_map = 0.
for epoc in range(epoc_num):
if epoc == 0:
# test quantized model without quantization-aware training.
test_map = test(exe, test_prog, map_eval, test_py_reader)
batch = 0
train_py_reader.start()
while True:
try:
# train
start_time = time.time()
if parallel:
outs = train_exe.run(fetch_list=[loss.name])
else:
outs = exe.run(train_prog, fetch_list=[loss])
end_time = time.time()
avg_loss = np.mean(np.array(outs[0]))
if batch % 10 == 0:
print("Epoc {:d}, batch {:d}, loss {:.6f}, time {:.5f}".format(
epoc , batch, avg_loss, end_time - start_time))
except (fluid.core.EOFException, StopIteration):
train_reader().close()
train_py_reader.reset()
break
test_map = test(exe, test_prog, map_eval, test_py_reader)
save_model(exe, train_prog, model_save_dir, str(epoc))
if test_map > best_map:
best_map = test_map
save_model(exe, train_prog, model_save_dir, 'best_map')
print("Best test map {0}".format(best_map))
def eval(args, data_args, configs, val_file_list):
init_model = args.init_model
use_gpu = args.use_gpu
act_quant_type = args.act_quant_type
model_save_dir = args.model_save_dir
batch_size = configs['batch_size']
batch_size_per_device = batch_size
startup_prog = fluid.Program()
test_prog = fluid.Program()
test_py_reader, map_eval, nmsed_out, image = build_program(
main_prog=test_prog,
startup_prog=startup_prog,
train_params=configs,
is_train=False)
test_prog = test_prog.clone(for_test=True)
transpiler = fluid.contrib.QuantizeTranspiler(weight_bits=8,
activation_bits=8,
activation_quantize_type=act_quant_type,
weight_quantize_type='abs_max')
transpiler.training_transpile(test_prog, startup_prog)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
def if_exist(var):
return os.path.exists(os.path.join(init_model, var.name))
fluid.io.load_vars(exe, init_model, main_program=test_prog,
predicate=if_exist)
# freeze after load parameters
transpiler.freeze_program(test_prog, place)
test_reader = reader.test(data_args, val_file_list, batch_size)
test_py_reader.decorate_paddle_reader(test_reader)
test_map = test(exe, test_prog, map_eval, test_py_reader)
print("Test model {0}, map {1}".format(init_model, test_map))
# convert model to 8-bit before saving, but now Paddle can't load
# the 8-bit model to do inference.
# transpiler.convert_to_int8(test_prog, place)
fluid.io.save_inference_model(model_save_dir, [image.name],
[nmsed_out], exe, test_prog)
def infer(args, data_args):
model_dir = args.init_model
image_path = args.image_path
confs_threshold = args.confs_threshold
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
[inference_program, feed , fetch] = fluid.io.load_inference_model(
dirname=model_dir,
executor=exe,
model_filename='__model__')
#print(np.array(fluid.global_scope().find_var('conv2d_20.w_0').get_tensor()))
#print(np.max(np.array(fluid.global_scope().find_var('conv2d_20.w_0').get_tensor())))
infer_reader = reader.infer(data_args, image_path)
data = infer_reader()
data = data.reshape((1,) + data.shape)
outs = exe.run(inference_program,
feed={feed[0]: data},
fetch_list=fetch,
return_numpy=False)
out = np.array(outs[0])
draw_bounding_box_on_image(image_path, out, confs_threshold,
data_args.label_list)
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
# for pascalvoc
label_file = 'label_list'
train_list = 'trainval.txt'
val_list = 'test.txt'
dataset = 'pascalvoc'
mean_BGR = [float(m) for m in args.mean_BGR.split(",")]
image_shape = [int(m) for m in args.image_shape.split(",")]
lr_epochs = [int(m) for m in args.lr_epochs.split(",")]
lr_rates = [float(m) for m in args.lr_decay_rates.split(",")]
train_parameters[dataset]['image_shape'] = image_shape
train_parameters[dataset]['batch_size'] = args.batch_size
train_parameters[dataset]['lr'] = args.learning_rate
train_parameters[dataset]['epoc_num'] = args.epoc_num
train_parameters[dataset]['ap_version'] = args.ap_version
train_parameters[dataset]['lr_epochs'] = lr_epochs
train_parameters[dataset]['lr_decay'] = lr_rates
data_args = reader.Settings(
dataset=dataset,
data_dir=args.data_dir,
label_file=label_file,
resize_h=image_shape[1],
resize_w=image_shape[2],
mean_value=mean_BGR,
apply_distort=True,
apply_expand=True,
ap_version = args.ap_version)
if args.mode == 'train':
train(args, data_args, train_parameters[dataset], train_list, val_list)
elif args.mode == 'test':
eval(args, data_args, train_parameters[dataset], val_list)
else:
infer(args, data_args)
|
|
from __future__ import division
from datetime import timedelta
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Count, Avg
from tracking.settings import TRACK_PAGEVIEWS, TRACK_ANONYMOUS_USERS
from tracking.cache import CacheManager
class VisitorManager(CacheManager):
def active(self, registered_only=True):
"Returns all active users, e.g. not logged and non-expired session."
visitors = self.filter(
expiry_time__gt=timezone.now(),
end_time=None
)
if registered_only:
visitors = visitors.filter(user__isnull=False)
return visitors
def registered(self):
return self.get_queryset().filter(user__isnull=False)
def guests(self):
return self.get_queryset().filter(user__isnull=True)
def stats(self, start_date, end_date, registered_only=False):
"""Returns a dictionary of visits including:
* total visits
* unique visits
* return ratio
* pages per visit (if pageviews are enabled)
* time on site
for all users, registered users and guests.
"""
visitors = self.filter(
start_time__gte=start_date,
start_time__lt=end_date
)
stats = {
'total': 0,
'unique': 0,
'return_ratio': 0,
}
# All visitors
stats['total'] = total_count = visitors.count()
unique_count = 0
# No visitors! Nothing more to do.
if not total_count:
return stats
# Avg time on site
total_time_on_site = visitors.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
stats['time_on_site'] = timedelta(seconds=int(total_time_on_site))
# Registered user sessions
registered_visitors = visitors.filter(user__isnull=False)
registered_total_count = registered_visitors.count()
if registered_total_count:
registered_unique_count = registered_visitors.values(
'user'
).distinct().count()
# Avg time on site
time_on_site = registered_visitors.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
# Update the total unique count..
unique_count += registered_unique_count
# Set the registered stats..
returns = (registered_total_count - registered_unique_count)
stats['registered'] = {
'total': registered_total_count,
'unique': registered_unique_count,
'return_ratio': (returns / registered_total_count) * 100,
'time_on_site': timedelta(seconds=int(time_on_site)),
}
# Get stats for our guests..
if TRACK_ANONYMOUS_USERS and not registered_only:
guests = visitors.filter(user__isnull=True)
guest_total_count = guests.count()
if guest_total_count:
guest_unique_count = guests.values(
'ip_address'
).distinct().count()
# Avg time on site
guest_time_on_site = guests.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
# return rate
returns = (guest_total_count - guest_unique_count)
return_ratio = (returns / guest_total_count) * 100
time_on_site = timedelta(seconds=int(guest_time_on_site))
else:
guest_total_count = 0
guest_unique_count = 0
return_ratio = 0.0
time_on_site = timedelta(0)
# Update the total unique count
unique_count += guest_unique_count
stats['guests'] = {
'total': guest_total_count,
'unique': guest_unique_count,
'return_ratio': return_ratio,
'time_on_site': time_on_site,
}
# Finish setting the total visitor counts
returns = (total_count - unique_count)
stats['unique'] = unique_count
stats['return_ratio'] = (returns / total_count) * 100
# If pageviews are being tracked, add the aggregate pages-per-visit
if TRACK_PAGEVIEWS:
if 'registered' in stats:
pages_per_visit = registered_visitors.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
stats['registered']['pages_per_visit'] = pages_per_visit
if TRACK_ANONYMOUS_USERS and not registered_only:
stats['guests']['pages_per_visit'] = guests.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
total_per_visit = visitors.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
else:
if 'registered' in stats:
total_per_visit = stats['registered']['pages_per_visit']
else:
total_per_visit = 0
stats['pages_per_visit'] = total_per_visit
return stats
def user_stats(self, start_date=None, end_date=None):
user_kwargs = {
'visit_history__start_time__lt': end_date,
}
visit_kwargs = {
'start_time__lt': end_date,
}
if start_date:
user_kwargs['visit_history__start_time__gte'] = start_date
visit_kwargs['start_time__gte'] = start_date
else:
user_kwargs['visit_history__start_time__isnull'] = False
visit_kwargs['start_time__isnull'] = False
users = list(get_user_model().objects.filter(**user_kwargs).annotate(
visit_count=Count('visit_history'),
time_on_site=Avg('visit_history__time_on_site'),
).filter(visit_count__gt=0).order_by(
'-time_on_site',
get_user_model().USERNAME_FIELD,
))
# Aggregate pageviews per visit
for user in users:
user.pages_per_visit = user.visit_history.filter(
**visit_kwargs
).annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
# Lop off the floating point, turn into timedelta
user.time_on_site = timedelta(seconds=int(user.time_on_site))
return users
class PageviewManager(models.Manager):
def stats(self, start_date=None, end_date=None, registered_only=False):
"""Returns a dictionary of pageviews including:
* total pageviews
for all users, registered users and guests.
"""
pageviews = self.filter(
visitor__start_time__lt=end_date,
visitor__start_time__gte=start_date,
).select_related('visitor')
stats = {
'total': 0,
'unique': 0,
}
stats['total'] = total_views = pageviews.count()
unique_count = 0
if not total_views:
return stats
# Registered user sessions
registered_pageviews = pageviews.filter(visitor__user__isnull=False)
registered_count = registered_pageviews.count()
if registered_count:
registered_unique_count = registered_pageviews.values(
'visitor', 'url').distinct().count()
# Update the total unique count...
unique_count += registered_unique_count
stats['registered'] = {
'total': registered_count,
'unique': registered_unique_count,
}
if TRACK_ANONYMOUS_USERS and not registered_only:
guest_pageviews = pageviews.filter(visitor__user__isnull=True)
guest_count = guest_pageviews.count()
if guest_count:
guest_unique_count = guest_pageviews.values(
'visitor', 'url').distinct().count()
# Update the total unique count...
unique_count += guest_unique_count
stats['guests'] = {
'total': guest_count,
'unique': guest_unique_count,
}
# Finish setting the total visitor counts
stats['unique'] = unique_count
return stats
|
|
from django.test import TestCase
from django.core import mail
from django.core.mail.backends.locmem import EmailBackend as LocMemEmailBackend
from django.utils.timezone import now as datetime_now
from mailer.models import (Message, MessageLog, DontSendEntry, db_to_email, email_to_db,
PRIORITY_HIGH, PRIORITY_MEDIUM, PRIORITY_LOW, PRIORITY_DEFERRED)
import mailer
from mailer import engine
from mock import patch, Mock
import pickle
import lockfile
import smtplib
import time
class TestMailerEmailBackend(object):
outbox = []
def __init__(self, **kwargs):
del self.outbox[:]
def open(self):
pass
def close(self):
pass
def send_messages(self, email_messages):
self.outbox.extend(email_messages)
class FailingMailerEmailBackend(LocMemEmailBackend):
def send_messages(self, email_messages):
raise smtplib.SMTPSenderRefused(1, "foo", "foo@foo.com")
class TestBackend(TestCase):
def test_save_to_db(self):
"""
Test that using send_mail creates a Message object in DB instead, when EMAIL_BACKEND is set.
"""
self.assertEqual(Message.objects.count(), 0)
with self.settings(EMAIL_BACKEND="mailer.backend.DbBackend"):
mail.send_mail("Subject", "Body", "sender@example.com", ["recipient@example.com"])
self.assertEqual(Message.objects.count(), 1)
class TestSending(TestCase):
def setUp(self):
# Ensure outbox is empty at start
del TestMailerEmailBackend.outbox[:]
def test_mailer_email_backend(self):
"""
Test that calling "manage.py send_mail" actually sends mail using the
specified MAILER_EMAIL_BACKEND
"""
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend"):
mailer.send_mail("Subject", "Body", "sender1@example.com", ["recipient@example.com"])
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(len(TestMailerEmailBackend.outbox), 0)
engine.send_all()
self.assertEqual(len(TestMailerEmailBackend.outbox), 1)
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(MessageLog.objects.count(), 1)
def test_retry_deferred(self):
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.FailingMailerEmailBackend"):
mailer.send_mail("Subject", "Body", "sender2@example.com", ["recipient@example.com"])
engine.send_all()
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 1)
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
engine.send_all()
self.assertEqual(len(mail.outbox), 0)
# Should not have sent the deferred ones
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 1)
# Now mark them for retrying
Message.objects.retry_deferred()
engine.send_all()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(Message.objects.count(), 0)
def test_send_loop(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
with patch("mailer.engine.send_all", side_effect=StopIteration) as send:
with patch("time.sleep", side_effect=StopIteration) as sleep:
self.assertRaises(StopIteration, engine.send_loop)
sleep.assert_called_once_with(engine.EMPTY_QUEUE_SLEEP)
send.assert_not_called()
mailer.send_mail("Subject", "Body", "sender15@example.com", ["rec@example.com"])
self.assertRaises(StopIteration, engine.send_loop)
send.assert_called_once()
def test_send_html(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
mailer.send_html_mail("Subject", "Body", "<html><body>Body</body></html>",
"htmlsender1@example.com", ["recipient@example.com"],
priority=PRIORITY_HIGH)
# Ensure deferred was not deleted
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 0)
engine.send_all()
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
# Default "plain text"
self.assertEqual(sent.body, "Body")
self.assertEqual(sent.content_subtype, "plain")
# Alternative "text/html"
self.assertEqual(sent.alternatives[0],
("<html><body>Body</body></html>", "text/html"))
def test_send_mass_mail(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
mails = (
("Subject", "Body", "mass0@example.com", ["recipient0@example.com"]),
("Subject", "Body", "mass1@example.com", ["recipient1@example.com"]),
("Subject", "Body", "mass2@example.com", ["recipient2@example.com"]),
("Subject", "Body", "mass3@example.com", ["recipient3@example.com"]),
)
mailer.send_mass_mail(mails)
self.assertEqual(Message.objects.count(), 4)
self.assertEqual(Message.objects.deferred().count(), 0)
engine.send_all()
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(len(mail.outbox), 4)
for i, sent in enumerate(mail.outbox):
# Default "plain text"
self.assertEqual(sent.from_email, "mass{0}@example.com".format(i))
self.assertEqual(sent.to, ["recipient{0}@example.com".format(i)])
def test_mail_admins(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend", ADMINS=(("Test", "testadmin@example.com"),)): # noqa
mailer.mail_admins("Subject", "Admin Body")
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 0)
engine.send_all()
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
# Default "plain text"
self.assertEqual(sent.body, "Admin Body")
self.assertEqual(sent.to, ["testadmin@example.com"])
def test_mail_managers(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend", MANAGERS=(("Test", "testmanager@example.com"),)): # noqa
mailer.mail_managers("Subject", "Manager Body")
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 0)
engine.send_all()
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
# Default "plain text"
self.assertEqual(sent.body, "Manager Body")
self.assertEqual(sent.to, ["testmanager@example.com"])
def test_blacklisted_emails(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
now = datetime_now()
obj = DontSendEntry.objects.create(to_address="nogo@example.com", when_added=now)
self.assertTrue(obj.to_address, "nogo@example.com")
mailer.send_mail("Subject", "GoBody", "send1@example.com", ["go@example.com"])
mailer.send_mail("Subject", "NoGoBody", "send2@example.com", ["nogo@example.com"])
self.assertEqual(Message.objects.count(), 2)
self.assertEqual(Message.objects.deferred().count(), 0)
engine.send_all()
# All messages are processed
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
# but only one should get sent
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
# Default "plain text"
self.assertEqual(sent.body, "GoBody")
self.assertEqual(sent.to, ["go@example.com"])
def test_control_max_delivery_amount(self):
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend", MAILER_EMAIL_MAX_BATCH=2): # noqa
mailer.send_mail("Subject1", "Body1", "sender1@example.com", ["recipient1@example.com"])
mailer.send_mail("Subject2", "Body2", "sender2@example.com", ["recipient2@example.com"])
mailer.send_mail("Subject3", "Body3", "sender3@example.com", ["recipient3@example.com"])
self.assertEqual(Message.objects.count(), 3)
self.assertEqual(len(TestMailerEmailBackend.outbox), 0)
engine.send_all()
self.assertEqual(len(TestMailerEmailBackend.outbox), 2)
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(MessageLog.objects.count(), 2)
def test_control_max_retry_amount(self):
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend"): # noqa
# 5 normal emails scheduled for delivery
mailer.send_mail("Subject1", "Body1", "sender1@example.com", ["recipient1@example.com"])
mailer.send_mail("Subject2", "Body2", "sender2@example.com", ["recipient2@example.com"])
mailer.send_mail("Subject3", "Body3", "sender3@example.com", ["recipient3@example.com"])
mailer.send_mail("Subject4", "Body4", "sender4@example.com", ["recipient4@example.com"])
mailer.send_mail("Subject5", "Body5", "sender5@example.com", ["recipient5@example.com"])
self.assertEqual(Message.objects.count(), 5)
self.assertEqual(Message.objects.deferred().count(), 0)
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.FailingMailerEmailBackend", MAILER_EMAIL_MAX_DEFERRED=2): # noqa
# 2 will get deferred 3 remain undeferred
with patch("logging.warning") as w:
engine.send_all()
w.assert_called_once()
arg = w.call_args[0][0]
self.assertIn("EMAIL_MAX_DEFERRED", arg)
self.assertIn("stopping for this round", arg)
self.assertEqual(Message.objects.count(), 5)
self.assertEqual(Message.objects.deferred().count(), 2)
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend", MAILER_EMAIL_MAX_DEFERRED=2): # noqa
# 3 will be delivered, 2 remain deferred
engine.send_all()
self.assertEqual(len(TestMailerEmailBackend.outbox), 3)
# Should not have sent the deferred ones
self.assertEqual(Message.objects.count(), 2)
self.assertEqual(Message.objects.deferred().count(), 2)
# Now mark them for retrying
Message.objects.retry_deferred()
engine.send_all()
self.assertEqual(len(TestMailerEmailBackend.outbox), 2)
self.assertEqual(Message.objects.count(), 0)
def test_throttling_delivery(self):
TIME = 1 # throttle time = 1 second
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend", MAILER_EMAIL_THROTTLE=TIME): # noqa
mailer.send_mail("Subject", "Body", "sender13@example.com", ["recipient@example.com"])
mailer.send_mail("Subject", "Body", "sender14@example.com", ["recipient@example.com"])
start_time = time.time()
engine.send_all()
throttled_time = time.time() - start_time
self.assertEqual(len(TestMailerEmailBackend.outbox), 2)
self.assertEqual(Message.objects.count(), 0)
# Notes: 2 * TIME because 2 emails are sent during the test
self.assertGreater(throttled_time, 2 * TIME)
class TestLockNormal(TestCase):
def setUp(self):
class CustomError(Exception):
pass
self.CustomError = CustomError
self.lock_mock = Mock()
self.patcher_lock = patch("lockfile.FileLock", return_value=self.lock_mock)
self.patcher_prio = patch("mailer.engine.prioritize", side_effect=CustomError)
self.lock = self.patcher_lock.start()
self.prio = self.patcher_prio.start()
def test(self):
self.assertRaises(self.CustomError, engine.send_all)
self.lock_mock.acquire.assert_called_once_with(engine.LOCK_WAIT_TIMEOUT)
self.lock.assert_called_once_with("send_mail")
self.prio.assert_called_once()
def tearDown(self):
self.patcher_lock.stop()
self.patcher_prio.stop()
class TestLockLocked(TestCase):
def setUp(self):
config = {
"acquire.side_effect": lockfile.AlreadyLocked,
}
self.lock_mock = Mock(**config)
self.patcher_lock = patch("lockfile.FileLock", return_value=self.lock_mock)
self.patcher_prio = patch("mailer.engine.prioritize", side_effect=Exception)
self.lock = self.patcher_lock.start()
self.prio = self.patcher_prio.start()
def test(self):
engine.send_all()
self.lock_mock.acquire.assert_called_once_with(engine.LOCK_WAIT_TIMEOUT)
self.lock.assert_called_once_with("send_mail")
self.prio.assert_not_called()
def tearDown(self):
self.patcher_lock.stop()
self.patcher_prio.stop()
class TestLockTimeout(TestCase):
def setUp(self):
config = {
"acquire.side_effect": lockfile.LockTimeout,
}
self.lock_mock = Mock(**config)
self.patcher_lock = patch("lockfile.FileLock", return_value=self.lock_mock)
self.patcher_prio = patch("mailer.engine.prioritize", side_effect=Exception)
self.lock = self.patcher_lock.start()
self.prio = self.patcher_prio.start()
def test(self):
engine.send_all()
self.lock_mock.acquire.assert_called_once_with(engine.LOCK_WAIT_TIMEOUT)
self.lock.assert_called_once_with("send_mail")
self.prio.assert_not_called()
def tearDown(self):
self.patcher_lock.stop()
self.patcher_prio.stop()
class TestPrioritize(TestCase):
def test_prioritize(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
mailer.send_mail("Subject", "Body", "prio1@example.com", ["r@example.com"],
priority=PRIORITY_HIGH)
mailer.send_mail("Subject", "Body", "prio2@example.com", ["r@example.com"],
priority=PRIORITY_MEDIUM)
mailer.send_mail("Subject", "Body", "prio3@example.com", ["r@example.com"],
priority=PRIORITY_LOW)
mailer.send_mail("Subject", "Body", "prio4@example.com", ["r@example.com"],
priority=PRIORITY_HIGH)
mailer.send_mail("Subject", "Body", "prio5@example.com", ["r@example.com"],
priority=PRIORITY_HIGH)
mailer.send_mail("Subject", "Body", "prio6@example.com", ["r@example.com"],
priority=PRIORITY_LOW)
mailer.send_mail("Subject", "Body", "prio7@example.com", ["r@example.com"],
priority=PRIORITY_LOW)
mailer.send_mail("Subject", "Body", "prio8@example.com", ["r@example.com"],
priority=PRIORITY_MEDIUM)
mailer.send_mail("Subject", "Body", "prio9@example.com", ["r@example.com"],
priority=PRIORITY_MEDIUM)
mailer.send_mail("Subject", "Body", "prio10@example.com", ["r@example.com"],
priority=PRIORITY_LOW)
mailer.send_mail("Subject", "Body", "prio11@example.com", ["r@example.com"],
priority=PRIORITY_MEDIUM)
mailer.send_mail("Subject", "Body", "prio12@example.com", ["r@example.com"],
priority=PRIORITY_HIGH)
mailer.send_mail("Subject", "Body", "prio13@example.com", ["r@example.com"],
priority=PRIORITY_DEFERRED)
self.assertEqual(Message.objects.count(), 13)
self.assertEqual(Message.objects.deferred().count(), 1)
self.assertEqual(Message.objects.non_deferred().count(), 12)
messages = engine.prioritize()
# High priority
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio1@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio4@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio5@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio12@example.com")
msg.delete()
# Medium priority
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio2@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio8@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio9@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio11@example.com")
msg.delete()
# Low priority
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio3@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio6@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio7@example.com")
msg.delete()
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio10@example.com")
msg.delete()
# Add one more mail that should still get delivered
mailer.send_mail("Subject", "Body", "prio14@example.com", ["r@example.com"],
priority=PRIORITY_HIGH)
msg = next(messages)
self.assertEqual(msg.email.from_email, "prio14@example.com")
msg.delete()
# Ensure nothing else comes up
self.assertRaises(StopIteration, lambda: next(messages))
# Ensure deferred was not deleted
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 1)
class TestMessages(TestCase):
def test_message(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
mailer.send_mail("Subject Msg", "Body", "msg1@example.com", ["rec1@example.com"])
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(MessageLog.objects.count(), 0)
msg = Message.objects.all()[0]
self.assertEqual(msg.email.from_email, "msg1@example.com")
self.assertEqual(msg.to_addresses, ["rec1@example.com"])
self.assertEqual(msg.subject, "Subject Msg")
# Fake a msg stored in DB with invalid data
msg.message_data = ""
self.assertEqual(msg.to_addresses, [])
self.assertEqual(msg.subject, "")
msg.save()
with patch("logging.warning") as w:
engine.send_all()
w.assert_called_once()
arg = w.call_args[0][0]
self.assertIn("message discarded due to failure in converting from DB", arg)
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
# Delivery should discard broken messages
self.assertEqual(MessageLog.objects.count(), 0)
def test_message_log(self):
with self.settings(MAILER_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend"):
mailer.send_mail("Subject Log", "Body", "log1@example.com", ["1gol@example.com"])
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(MessageLog.objects.count(), 0)
engine.send_all()
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(Message.objects.deferred().count(), 0)
self.assertEqual(MessageLog.objects.count(), 1)
log = MessageLog.objects.all()[0]
self.assertEqual(log.email.from_email, "log1@example.com")
self.assertEqual(log.to_addresses, ["1gol@example.com"])
self.assertEqual(log.subject, "Subject Log")
# Fake a log entry without email
log.message_data = ""
self.assertEqual(log.to_addresses, [])
self.assertEqual(log.subject, "")
class TestDbToEmail(TestCase):
def test_db_to_email(self):
# Empty/Invalid content
self.assertEqual(db_to_email(""), None)
self.assertEqual(db_to_email(None), None)
# Other objects which should be returned as-is
data = "Hello Email"
self.assertEqual(db_to_email(email_to_db(data)), data)
data = ["Test subject", "Test body", "testsender@example.com", ["testrec@example.com"]]
self.assertEqual(db_to_email(email_to_db(data)), data)
email = mail.EmailMessage(*data)
converted_email = db_to_email(email_to_db(email))
self.assertEqual(converted_email.body, email.body)
self.assertEqual(converted_email.subject, email.subject)
self.assertEqual(converted_email.from_email, email.from_email)
self.assertEqual(converted_email.to, email.to)
# Test old pickle in DB format
db_email = pickle.dumps(email)
converted_email = db_to_email(db_email)
self.assertEqual(converted_email.body, email.body)
self.assertEqual(converted_email.subject, email.subject)
self.assertEqual(converted_email.from_email, email.from_email)
self.assertEqual(converted_email.to, email.to)
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import time
from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc
from tempest.common import service_client
from tempest import exceptions
class OrchestrationClient(service_client.ServiceClient):
def list_stacks(self, params=None):
"""Lists all stacks for a user."""
uri = 'stacks'
if params:
uri += '?%s' % urllib.urlencode(params)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['stacks'])
def create_stack(self, name, disable_rollback=True, parameters=None,
timeout_mins=60, template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment,
files)
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_stack(self, stack_identifier, name, disable_rollback=True,
parameters=None, timeout_mins=60, template=None,
template_url=None, environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment)
uri = "stacks/%s" % stack_identifier
resp, body = self.put(uri, headers=headers, body=body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
def _prepare_update_create(self, name, disable_rollback=True,
parameters=None, timeout_mins=60,
template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
post_body = {
"stack_name": name,
"disable_rollback": disable_rollback,
"parameters": parameters,
"timeout_mins": timeout_mins,
"template": "HeatTemplateFormatVersion: '2012-12-12'\n",
"environment": environment,
"files": files
}
if template:
post_body['template'] = template
if template_url:
post_body['template_url'] = template_url
body = json.dumps(post_body)
# Password must be provided on stack create so that heat
# can perform future operations on behalf of the user
headers = self.get_headers()
headers['X-Auth-Key'] = self.password
headers['X-Auth-User'] = self.user
return headers, body
def show_stack(self, stack_identifier):
"""Returns the details of a single stack."""
url = "stacks/%s" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['stack'])
def suspend_stack(self, stack_identifier):
"""Suspend a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'suspend': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def resume_stack(self, stack_identifier):
"""Resume a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'resume': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def list_resources(self, stack_identifier):
"""Returns the details of a single resource."""
url = "stacks/%s/resources" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resources'])
def show_resource(self, stack_identifier, resource_name):
"""Returns the details of a single resource."""
url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['resource'])
def delete_stack(self, stack_identifier):
"""Deletes the specified Stack."""
resp, _ = self.delete("stacks/%s" % str(stack_identifier))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$'):
"""Waits for a Resource to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.show_resource(
stack_identifier, resource_name)
except lib_exc.NotFound:
# ignore this, as the resource may not have
# been created yet
pass
else:
resource_name = body['resource_name']
resource_status = body['resource_status']
if resource_status == status:
return
if fail_regexp.search(resource_status):
raise exceptions.StackResourceBuildErrorException(
resource_name=resource_name,
stack_identifier=stack_identifier,
resource_status=resource_status,
resource_status_reason=body['resource_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Resource %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(resource_name,
status,
resource_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def wait_for_stack_status(self, stack_identifier, status,
failure_pattern='^.*_FAILED$'):
"""Waits for a Stack to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.show_stack(stack_identifier)
except lib_exc.NotFound:
if status == 'DELETE_COMPLETE':
return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
return body
if fail_regexp.search(stack_status):
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack_status,
stack_status_reason=body['stack_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Stack %s failed to reach %s status (current: %s) '
'within the required time (%s s).' %
(stack_name, status, stack_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def show_resource_metadata(self, stack_identifier, resource_name):
"""Returns the resource's metadata."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/metadata'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['metadata'])
def list_events(self, stack_identifier):
"""Returns list of all events for a stack."""
url = 'stacks/{stack_identifier}/events'.format(**locals())
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def list_resource_events(self, stack_identifier, resource_name):
"""Returns list of all events for a resource from stack."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/events'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def show_event(self, stack_identifier, resource_name, event_id):
"""Returns the details of a single stack's event."""
url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
'/{event_id}'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['event'])
def show_template(self, stack_identifier):
"""Returns the template for the stack."""
url = ('stacks/{stack_identifier}/template'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def _validate_template(self, post_body):
"""Returns the validation request result."""
post_body = json.dumps(post_body)
resp, body = self.post('validate', post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def validate_template(self, template, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template': template,
'parameters': parameters,
}
return self._validate_template(post_body)
def validate_template_url(self, template_url, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template_url': template_url,
'parameters': parameters,
}
return self._validate_template(post_body)
def list_resource_types(self):
"""List resource types."""
resp, body = self.get('resource_types')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resource_types'])
def show_resource_type(self, resource_type_name):
"""Return the schema of a resource type."""
url = 'resource_types/%s' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def show_resource_type_template(self, resource_type_name):
"""Return the template of a resource type."""
url = 'resource_types/%s/template' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def create_software_config(self, name=None, config=None, group=None,
inputs=None, outputs=None, options=None):
headers, body = self._prep_software_config_create(
name, config, group, inputs, outputs, options)
url = 'software_configs'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def show_software_config(self, conf_id):
"""Returns a software configuration resource."""
url = 'software_configs/%s' % str(conf_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_config(self, conf_id):
"""Deletes a specific software configuration."""
url = 'software_configs/%s' % str(conf_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def create_software_deploy(self, server_id=None, config_id=None,
action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
None, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_software_deploy(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
deploy_id, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.put(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_software_deployments(self):
"""Returns a list of all deployments."""
url = 'software_deployments'
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def show_software_deployment(self, deploy_id):
"""Returns a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def show_software_deployment_metadata(self, server_id):
"""Return a config metadata for a specific server."""
url = 'software_deployments/metadata/%s' % server_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_deploy(self, deploy_id):
"""Deletes a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def _prep_software_config_create(self, name=None, conf=None, group=None,
inputs=None, outputs=None, options=None):
"""Prepares a software configuration body."""
post_body = {}
if name is not None:
post_body["name"] = name
if conf is not None:
post_body["config"] = conf
if group is not None:
post_body["group"] = group
if inputs is not None:
post_body["inputs"] = inputs
if outputs is not None:
post_body["outputs"] = outputs
if options is not None:
post_body["options"] = options
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None,
signal_transport=None):
"""Prepares a deployment create or update (if an id was given)."""
post_body = {}
if deploy_id is not None:
post_body["id"] = deploy_id
if server_id is not None:
post_body["server_id"] = server_id
if config_id is not None:
post_body["config_id"] = config_id
if action is not None:
post_body["action"] = action
if status is not None:
post_body["status"] = status
if input_values is not None:
post_body["input_values"] = input_values
if output_values is not None:
post_body["output_values"] = output_values
if status_reason is not None:
post_body["status_reason"] = status_reason
if signal_transport is not None:
post_body["signal_transport"] = signal_transport
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from blazar.db import api as db_api
from blazar import exceptions
LOG = logging.getLogger(__name__)
class BaseStatus(object):
"""Base class of status."""
# All statuses
ALL = ()
# Valid status transitions
NEXT_STATUSES = {}
@classmethod
def is_valid_transition(cls, current_status, next_status, **kwargs):
"""Check validity of a status transition.
:param current_status: Current status
:param next_status: Next status
:return: True if the transition is valid
"""
if next_status not in cls.NEXT_STATUSES[current_status]:
LOG.warn('Invalid transition from %s to %s.',
current_status, next_status)
return False
return True
class EventStatus(BaseStatus):
"""Event status class."""
# Statuses of an event
UNDONE = 'UNDONE'
IN_PROGRESS = 'IN_PROGRESS'
DONE = 'DONE'
ERROR = 'ERROR'
ALL = (UNDONE, IN_PROGRESS, DONE, ERROR)
# Valid status transitions
NEXT_STATUSES = {
UNDONE: (IN_PROGRESS,),
IN_PROGRESS: (DONE, ERROR),
DONE: (),
ERROR: ()
}
class ReservationStatus(BaseStatus):
"""Reservation status class."""
# Statuses of a reservation
PENDING = 'pending'
ACTIVE = 'active'
DELETED = 'deleted'
ERROR = 'error'
ALL = (PENDING, ACTIVE, DELETED, ERROR)
# Valid status transitions
NEXT_STATUSES = {
PENDING: (ACTIVE, DELETED, ERROR),
ACTIVE: (DELETED, ERROR),
DELETED: (),
ERROR: (DELETED,)
}
class LeaseStatus(BaseStatus):
"""Lease status class."""
# Stable statuses of a lease
PENDING = 'PENDING'
ACTIVE = 'ACTIVE'
TERMINATED = 'TERMINATED'
ERROR = 'ERROR'
STABLE = (PENDING, ACTIVE, TERMINATED, ERROR)
# Transitional statuses of a lease
CREATING = 'CREATING'
STARTING = 'STARTING'
UPDATING = 'UPDATING'
TERMINATING = 'TERMINATING'
DELETING = 'DELETING'
TRANSITIONAL = (CREATING, STARTING, UPDATING, TERMINATING, DELETING)
# All statuses
ALL = STABLE + TRANSITIONAL
# Valid status transitions
NEXT_STATUSES = {
PENDING: (STARTING, UPDATING, DELETING),
ACTIVE: (TERMINATING, UPDATING, DELETING),
TERMINATED: (UPDATING, DELETING),
ERROR: (TERMINATING, UPDATING, DELETING),
CREATING: (PENDING, DELETING),
STARTING: (ACTIVE, ERROR, DELETING),
UPDATING: STABLE + (DELETING,),
TERMINATING: (TERMINATED, ERROR, DELETING),
DELETING: (ERROR,)
}
@classmethod
def is_valid_transition(cls, current, next, **kwargs):
"""Check validity of a status transition.
:param current: Current status
:param next: Next status
:return: True if the transition is valid
"""
if super(LeaseStatus, cls).is_valid_transition(current,
next, **kwargs):
if cls.is_valid_combination(kwargs['lease_id'], next):
return True
else:
LOG.warn('Invalid combination of statuses.')
return False
@classmethod
def is_valid_combination(cls, lease_id, status):
"""Validator for the combination of statuses.
Check if the combination of statuses of lease, reservations and events
is valid
:param lease_id: Lease ID
:param status: Lease status
:return: True if the combination is valid
"""
# Validate reservation statuses
reservations = db_api.reservation_get_all_by_lease_id(lease_id)
if any([r['status'] not in COMBINATIONS[status]['reservation']
for r in reservations]):
return False
# Validate event statuses
for event_type in ('start_lease', 'end_lease'):
event = db_api.event_get_first_sorted_by_filters(
'lease_id', 'asc',
{'lease_id': lease_id, 'event_type': event_type}
)
if event['status'] not in COMBINATIONS[status][event_type]:
return False
return True
@classmethod
def is_stable(cls, lease_id):
"""Check if the lease status is stable
:param lease_id: Lease ID
:return: True if the status is in (PENDING, ACTIVE, TERMINATED, ERROR)
"""
lease = db_api.lease_get(lease_id)
return (lease['status'] in cls.STABLE)
@classmethod
def lease_status(cls, transition, result_in):
"""Decorator for managing a lease status.
This checks and updates a lease status before and after executing a
decorated function.
:param transition: A status which is set while executing the
decorated function.
:param result_in: A tuple of statuses to which a lease transits after
executing the decorated function.
"""
def decorator(func):
def wrapper(*args, **kwargs):
# Update a lease status
lease_id = kwargs['lease_id']
lease = db_api.lease_get(lease_id)
if cls.is_valid_transition(lease['status'],
transition,
lease_id=lease_id):
db_api.lease_update(lease_id,
{'status': transition})
LOG.debug('Status of lease %s changed from %s to %s.',
lease_id, lease['status'], transition)
else:
LOG.warn('Aborting %s. '
'Invalid lease status transition from %s to %s.',
func.__name__, lease['status'],
transition)
raise exceptions.InvalidStatus
# Executing the wrapped function
try:
result = func(*args, **kwargs)
except Exception as e:
LOG.exception('Lease %s went into ERROR status. %s',
lease_id, str(e))
db_api.lease_update(lease_id,
{'status': cls.ERROR})
raise e
# Update a lease status if it exists
if db_api.lease_get(lease_id):
next_status = cls.derive_stable_status(lease_id)
if (next_status in result_in
and cls.is_valid_transition(transition,
next_status,
lease_id=lease_id)):
db_api.lease_update(lease_id,
{'status': next_status})
LOG.debug('Status of lease %s changed from %s to %s.',
lease_id, transition, next_status)
else:
LOG.error('Lease %s went into ERROR status.',
lease_id)
db_api.lease_update(lease_id, {'status': cls.ERROR})
raise exceptions.InvalidStatus
return result
return wrapper
return decorator
@classmethod
def derive_stable_status(cls, lease_id):
"""Derive stable lease status.
This derives a lease status from statuses of reservations and events.
:param lease_id: Lease ID
:return: Derived lease status
"""
# Possible lease statuses. Key is a tuple of (lease_start event
# status, lease_end event status)
possible_statuses = {
(EventStatus.UNDONE, EventStatus.UNDONE): cls.PENDING,
(EventStatus.DONE, EventStatus.UNDONE): cls.ACTIVE,
(EventStatus.DONE, EventStatus.DONE): cls.TERMINATED
}
# Derive a lease status from event statuses
event_statuses = {}
for event_type in ('start_lease', 'end_lease'):
event = db_api.event_get_first_sorted_by_filters(
'lease_id', 'asc',
{'lease_id': lease_id, 'event_type': event_type}
)
event_statuses[event_type] = event['status']
try:
status = possible_statuses[(event_statuses['start_lease'],
event_statuses['end_lease'])]
except KeyError:
status = cls.ERROR
# Check the combination of statuses.
if cls.is_valid_combination(lease_id, status):
return status
else:
return cls.ERROR
COMBINATIONS = {
LeaseStatus.CREATING: {
'reservation': (ReservationStatus.PENDING,),
'start_lease': (EventStatus.UNDONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.PENDING: {
'reservation': (ReservationStatus.PENDING,),
'start_lease': (EventStatus.UNDONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.STARTING: {
'reservation': (ReservationStatus.PENDING,
ReservationStatus.ACTIVE,
ReservationStatus.ERROR),
'start_lease': (EventStatus.IN_PROGRESS,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.ACTIVE: {
'reservation': (ReservationStatus.ACTIVE,),
'start_lease': (EventStatus.DONE,),
'end_lease': (EventStatus.UNDONE,)
},
LeaseStatus.TERMINATING: {
'reservation': (ReservationStatus.ACTIVE,
ReservationStatus.DELETED,
ReservationStatus.ERROR),
'start_lease': (EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.IN_PROGRESS,)
},
LeaseStatus.TERMINATED: {
'reservation': (ReservationStatus.DELETED,),
'start_lease': (EventStatus.DONE,),
'end_lease': (EventStatus.DONE,)
},
LeaseStatus.DELETING: {
'reservation': ReservationStatus.ALL,
'start_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR)
},
LeaseStatus.UPDATING: {
'reservation': ReservationStatus.ALL,
'start_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.DONE,
EventStatus.ERROR)
},
LeaseStatus.ERROR: {
'reservation': ReservationStatus.ERROR,
'start_lease': (EventStatus.DONE,
EventStatus.ERROR),
'end_lease': (EventStatus.UNDONE,
EventStatus.ERROR)
}
}
event = EventStatus
reservation = ReservationStatus
lease = LeaseStatus
|
|
"""
Copyright (c) 2010 Karl-Michael Schneider
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os, os.path, logging
from time import sleep
from sikuli.Sikuli import SCREEN, openApp
from sikuli.Region import Region
from sikuli.Key import Key, KEY_ALT
from sikuliimport.projects import IMG_INSTALLER_WELCOME
from seagull.window import AnchoredWindow
from seagull.buttons import Buttons
from seagull.checkboxes import VerticalCheckboxList
from seagull.util import Wait
from seagull.images import IMG_BUTTONS, IMG_BUTTONS_DISABLED, IMG_CHECKBOXES
_LOGGER = logging.getLogger(__name__)
WELCOME_WINDOW_TIMEOUT = 30
NEXT_BUTTON_ENABLED_TIMEOUT = 20
INSTALL_TIME_MAX_SECONDS = 600
ANCHOR_IMAGE_OFFSET_X = 3
ANCHOR_IMAGE_OFFSET_Y = 30
WINDOW_WIDTH = 499
WINDOW_HEIGHT = 392
BUTTON_REGION_HEIGHT = 48
CONFIRM_WINDOW_WIDTH = 349
CONFIRM_WINDOW_HEIGHT = 143
class Installer(AnchoredWindow):
"""Class to automate an installer."""
pages = ['Welcome', 'Configure Shortcuts', 'Select Installation Folder',
'Ready to Install', 'Installing', 'Complete', 'Cancelled']
welcome_page = 0
shortcuts_page = 1
folder_page = 2
install_page = 3
installing_page = 4
complete_page = 5
cancelled_page = 6
# defines which buttons exist on which page
button_page_map = {
'next' : [welcome_page, shortcuts_page, folder_page, installing_page],
'back' : [welcome_page, shortcuts_page, folder_page, install_page,
installing_page, complete_page, cancelled_page],
'install' : [install_page],
'finish' : [complete_page, cancelled_page],
'cancel' : [welcome_page, shortcuts_page, folder_page, install_page,
installing_page, complete_page, cancelled_page]
}
def __init__(self, installer_path):
if not os.path.exists(installer_path):
raise Exception('No such file: %s' % installer_path)
self.path = installer_path
AnchoredWindow.__init__(self, IMG_INSTALLER_WELCOME,
ANCHOR_IMAGE_OFFSET_X, ANCHOR_IMAGE_OFFSET_Y,
WINDOW_WIDTH, WINDOW_HEIGHT,
name = 'installer', title = 'Setup')
self.button_region = None
self.button_images = IMG_BUTTONS
self.disabled_button_images = IMG_BUTTONS_DISABLED
self.buttons = None
self.buttons_valid = None
self.confirm_window_open = None
self.confirm_window_region = None
self.confirm_button_images = { 'yes' : IMG_BUTTONS['yes'],
'no' : IMG_BUTTONS['no'] }
self.confirm_buttons = None
self.shortcut_checkboxes = None
self.running = False
self.installing = None
self.page = None
def _ensure(self, **states):
for attr, value in states.iteritems():
if value is not None:
if value:
if not getattr(self, attr):
raise Exception('installer is not %s' % attr)
else:
if getattr(self, attr):
raise Exception('installer is %s' % attr)
def _ensure_button(self, name):
if self.page not in self.button_page_map[name.lower()]:
raise Exception("no '%s' button on '%s' page" %
(name, self.pages[self.page]))
def _ensure_buttons_valid(self):
if self.buttons_valid:
return
self.buttons.find_buttons()
self.buttons_valid = True
def _ensure_button_enabled(self, name):
self._ensure_buttons_valid()
if not self.buttons.is_button_enabled(name.lower()):
raise Exception(name + ' button is not enabled')
def run(self):
"""Runs the installer."""
self._ensure(running = False)
_LOGGER.info('Running %s', self.path)
openApp(self.path)
self.running = True
self.installing = False
self.anchor(WELCOME_WINDOW_TIMEOUT)
_LOGGER.info('Installer window has appeared')
self.page = self.welcome_page
self.button_region = Region(self.getX(),
self.getY() + self.getH() - BUTTON_REGION_HEIGHT,
self.getW(), BUTTON_REGION_HEIGHT)
self.buttons = Buttons(self.button_images,
self.disabled_button_images, region = self.button_region)
self.buttons.find_buttons()
self.buttons_valid = True
self.buttons.waitUntilButtonIsEnabled('next',
NEXT_BUTTON_ENABLED_TIMEOUT)
self.confirm_window_open = False
self.confirm_window_region = Region(
self.getX() + (self.getW() - CONFIRM_WINDOW_WIDTH) / 2,
self.getY() + (self.getH() - CONFIRM_WINDOW_HEIGHT) / 2,
CONFIRM_WINDOW_WIDTH, CONFIRM_WINDOW_HEIGHT)
self.confirm_buttons = Buttons(self.confirm_button_images,
region = self.confirm_window_region)
_LOGGER.info('Waiting for Next button to be enabled')
self.shortcut_checkboxes = None
def next(self):
"""Clicks the Next button.
Raises Exception if the Next button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Next')
self._ensure_button_enabled('Next')
self.buttons.click('next')
sleep(1)
self.page += 1
self.buttons_valid = False
_LOGGER.info('now on %s page', self.pages[self.page])
def next_key(self):
"""Presses the Next button using the keyboard.
Raises Exception if the Next button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Next')
self._ensure_button_enabled('Next')
self.setFocus()
SCREEN.type('n', KEY_ALT)
sleep(1)
self.page += 1
self.buttons_valid = False
_LOGGER.info('now on %s page', self.pages[self.page])
def back(self):
"""Clicks the Back button.
Raises Exception if the Back button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Back')
self._ensure_button_enabled('Back')
self.buttons.click('back')
sleep(1)
self.page -= 1
self.buttons_valid = False
_LOGGER.info('now on %s page', self.pages[self.page])
def back_key(self):
"""Presses the Back button using the keyboard.
Raises Exception if the Back button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Back')
self._ensure_button_enabled('Back')
self.setFocus()
SCREEN.type('b', KEY_ALT)
sleep(1)
self.page -= 1
self.buttons_valid = False
_LOGGER.info('now on %s page', self.pages[self.page])
def cancel(self):
"""Clicks the Cancel button and the Yes button.
Raises Exception if the Cancel button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Cancel')
self._ensure_button_enabled('Cancel')
self.buttons.click('cancel')
sleep(1)
self.confirm_window_open = True
self.confirm_cancel('yes')
self.page = self.cancelled_page
self.installing = False
self.buttons_valid = False
def cancel_key(self):
"""Presses the Cancel button and confirms using the keyboard.
Raises Exception if the Cancel button is not enabled.
"""
self._ensure(running = True)
self._ensure_button('Cancel')
self._ensure_button_enabled('Cancel')
self.setFocus()
SCREEN.type(Key.ESC)
sleep(1)
SCREEN.type('y')
sleep(1)
self.page = self.cancelled_page
self.installing = False
self.buttons_valid = False
def finish(self):
"""Clicks the Finish button."""
self._ensure(running = True)
self._ensure_button('Finish')
self._ensure_buttons_valid()
_LOGGER.info('closing installer')
self.buttons.click('finish')
sleep(1)
self.running = False
def finish_key(self):
"""Presses the Finish button using the keyboard."""
self._ensure(running = True)
self._ensure_button('Finish')
_LOGGER.info('closing installer')
self.setFocus()
SCREEN.type('f', KEY_ALT)
sleep(1)
self.running = False
def install(self):
"""Clicks the install button.
Raises Exception if the installer is not on the 'Ready to Install'
page.
"""
self._ensure(running = True)
self._ensure_button('Install')
self._ensure_buttons_valid()
self.buttons.click('install')
sleep(1)
self.page = self.installing_page
self.installing = True
self.buttons_valid = False
def install_key(self):
"""Presses the install button using the keyboard.
Raises Exception if the installer is not on the 'Ready to Install'
page.
"""
self._ensure(running = True)
self._ensure_button('Install')
self.setFocus()
SCREEN.type('i', KEY_ALT)
sleep(1)
self.page = self.installing_page
self.installing = True
self.buttons_valid = False
def close(self):
"""Closes the installer by clicking the Close button in the window
title bar and confirming if necessary.
"""
AnchoredWindow.close(self)
sleep(1)
if not self.page in [self.complete_page, self.cancelled_page]:
self.confirm_window_open = True
self.confirm_cancel('yes')
AnchoredWindow.close(self)
sleep(1)
self.running = False
self.installing = False
def confirm_cancel(self, button):
"""Clicks the specified button in the confirmation window.
Raises Exception if the confirmation window is not open.
"""
if not self.confirm_window_open:
raise Exception('confirmation window is not open')
self.confirm_buttons.find_buttons()
self.confirm_buttons.click(button)
sleep(1)
self.confirm_window_open = False
def _configure_shortcut(self, shortcut, add_shortcut):
self._ensure(running = True)
if self.page != self.shortcuts_page:
raise Exception('installer is not on the Configure Shortcuts page')
if self.shortcut_checkboxes is None:
self.shortcut_checkboxes = VerticalCheckboxList(IMG_CHECKBOXES,
region = self)
self.shortcut_checkboxes.find_elements()
if self.shortcut_checkboxes.length() != 3:
raise Exception('expected three checkboxes but found %d' %
self.shortcut_checkboxes.length())
if bool(add_shortcut) != self.shortcut_checkboxes.is_checked(shortcut):
if bool(add_shortcut):
self.shortcut_checkboxes.check(shortcut)
else:
self.shortcut_checkboxes.uncheck(shortcut)
sleep(1)
def configure_desktop_shortcut(self, add_shortcut = True):
"""Checks the checkbox for the Desktop shortcut.
If add_shortcut is False, unchecks the checkbox.
Raises Exception if the installer is not on the Configure Shortcuts
page.
"""
self._configure_shortcut(0, add_shortcut)
def configure_start_menu_shortcut(self, add_shortcut = True):
"""Checks the checkbox for the Start Menu shortcut.
If add_shortcut is False, unchecks the checkbox.
Raises Exception if the installer is not on the Configure Shortcuts
page.
"""
self._configure_shortcut(1, add_shortcut)
def configure_quick_launch_shortcut(self, add_shortcut = True):
"""Checks the checkbox for the Quick Launch shortcut.
If add_shortcut is False, unchecks the checkbox.
Raises Exception if the installer is not on the Configure Shortcuts
page.
"""
self._configure_shortcut(2, add_shortcut)
def is_finished(self):
"""Returns True if the installer has finished installing.
More precisely, returns True if the Finish button exists in the
installer window.
Raises Exception if the installer was not installing.
"""
self._ensure(running = True, installing = True)
self.buttons.find_buttons()
finished = self.buttons.exists_button('finish')
if finished:
self.installing = False
self.buttons_valid = True
self.page = self.complete_page
return finished
def wait_until_finished(self, timeout = INSTALL_TIME_MAX_SECONDS):
"""Waits until the installer finishes installing.
Checks every 3 seconds if the Finish button exists.
Raises Exception if the installer is not finished after the
specified timeout.
"""
self._ensure(running = True, installing = True)
waiting = Wait(timeout, interval = 3,
exception_message = 'installer not finished after %f seconds' %
timeout)
while not self.is_finished():
waiting.wait()
_LOGGER.info('finished')
def is_running(self):
"""Returns True if the installer is running."""
return self.running
def is_installing(self):
"""Returns True if the installer is installing."""
self._ensure(running = True)
return self.installing
def current_page(self):
"""Returns the current page number (0-based)."""
self._ensure(running = True)
return self.page
def current_page_title(self):
"""Returns the current page title."""
self._ensure(running = True)
return self.pages[self.page]
def defaultInstallation(self):
"""Runs the installer and installs the application, using default
options.
"""
self.run()
self.next() # go to "Configure Shortcuts"
self.next() # go to "Select Installation Folder"
self.next() # go to "Ready to Install"
self.install()
self.wait_until_finished()
self.finish()
|
|
# -*- coding: windows-1252 -*-
import struct
import sys
class Reader:
def __init__(self, filename, dump=False):
self.dump = dump
self.STREAMS = {}
doc = file(filename, 'rb').read()
self.header, self.data = doc[0:512], doc[512:]
del doc
self.__build_header()
self.__build_MSAT()
self.__build_SAT()
self.__build_directory()
self.__build_short_sectors_data()
if len(self.short_sectors_data) > 0:
self.__build_SSAT()
else:
if self.dump and (self.total_ssat_sectors != 0 or self.ssat_start_sid != -2):
print 'NOTE: header says that must be', self.total_ssat_sectors, 'short sectors'
print 'NOTE: starting at', self.ssat_start_sid, 'sector'
print 'NOTE: but file does not contains data in short sectors'
self.ssat_start_sid = -2
self.total_ssat_sectors = 0
self.SSAT = [-2]
for dentry in self.dir_entry_list[1:]:
(did,
sz, name,
t, c,
did_left, did_right, did_root,
dentry_start_sid,
stream_size
) = dentry
stream_data = ''
if stream_size > 0:
if stream_size >= self.min_stream_size:
args = (self.data, self.SAT, dentry_start_sid, self.sect_size)
else:
args = (self.short_sectors_data, self.SSAT, dentry_start_sid, self.short_sect_size)
stream_data = self.get_stream_data(*args)
if name != '':
# BAD IDEA: names may be equal. NEED use full paths...
self.STREAMS[name] = stream_data
def __build_header(self):
self.doc_magic = self.header[0:8]
if self.doc_magic != '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
raise Exception, 'Not an OLE file.'
self.file_uid = self.header[8:24]
self.rev_num = self.header[24:26]
self.ver_num = self.header[26:28]
self.byte_order = self.header[28:30]
self.log2_sect_size, = struct.unpack('<H', self.header[30:32])
self.log2_short_sect_size, = struct.unpack('<H', self.header[32:34])
self.total_sat_sectors, = struct.unpack('<L', self.header[44:48])
self.dir_start_sid, = struct.unpack('<l', self.header[48:52])
self.min_stream_size, = struct.unpack('<L', self.header[56:60])
self.ssat_start_sid, = struct.unpack('<l', self.header[60:64])
self.total_ssat_sectors, = struct.unpack('<L', self.header[64:68])
self.msat_start_sid, = struct.unpack('<l', self.header[68:72])
self.total_msat_sectors, = struct.unpack('<L', self.header[72:76])
self.sect_size = 1 << self.log2_sect_size
self.short_sect_size = 1 << self.log2_short_sect_size
if self.dump:
print 'file magic: '
print_bin_data(self.doc_magic)
print 'file uid: '
print_bin_data(self.file_uid)
print 'revision number: '
print_bin_data(self.rev_num)
print 'version number: '
print_bin_data(self.ver_num)
print 'byte order: '
print_bin_data(self.byte_order)
print 'sector size :', hex(self.sect_size), self.sect_size
# print 'total sectors in file :', hex(self.total_sectors), self.total_sectors
print 'short sector size :', hex(self.short_sect_size), self.short_sect_size
print 'Total number of sectors used for the SAT :', hex(self.total_sat_sectors), self.total_sat_sectors
print 'SID of first sector of the directory stream:', hex(self.dir_start_sid), self.dir_start_sid
print 'Minimum size of a standard stream :', hex(self.min_stream_size), self.min_stream_size
print 'SID of first sector of the SSAT :', hex(self.ssat_start_sid), self.ssat_start_sid
print 'Total number of sectors used for the SSAT :', hex(self.total_ssat_sectors), self.total_ssat_sectors
print 'SID of first additional sector of the MSAT :', hex(self.msat_start_sid), self.msat_start_sid
print 'Total number of sectors used for the MSAT :', hex(self.total_msat_sectors), self.total_msat_sectors
def __build_MSAT(self):
self.MSAT = list(struct.unpack('<109l', self.header[76:]))
next = self.msat_start_sid
while next > 0:
msat_sector = struct.unpack('<128l', self.data[next * self.sect_size:(next + 1) * self.sect_size])
self.MSAT.extend(msat_sector[:127])
next = msat_sector[-1]
if self.dump:
print 'MSAT (header part): \n', self.MSAT[:109]
print 'additional MSAT sectors: \n', self.MSAT[109:]
def __build_SAT(self):
sat_stream = ''.join([self.data[i * self.sect_size:(i + 1) * self.sect_size] for i in self.MSAT if i >= 0])
sat_sids_count = len(sat_stream) >> 2
self.SAT = struct.unpack('<%dl' % sat_sids_count, sat_stream) # SIDs tuple
if self.dump:
print 'SAT sid count:\n', sat_sids_count
print 'SAT content:\n', self.SAT
def __build_SSAT(self):
ssat_stream = self.get_stream_data(self.data, self.SAT, self.ssat_start_sid, self.sect_size)
ssids_count = len(ssat_stream) >> 2
self.SSAT = struct.unpack('<%dl' % ssids_count, ssat_stream)
if self.dump:
print 'SSID count:', ssids_count
print 'SSAT content:\n', self.SSAT
def __build_directory(self):
dir_stream = self.get_stream_data(self.data, self.SAT, self.dir_start_sid, self.sect_size)
self.dir_entry_list = []
i = 0
while i < len(dir_stream):
dentry = dir_stream[i:i + 128] # 128 -- dir entry size
i += 128
did = len(self.dir_entry_list)
sz, = struct.unpack('<H', dentry[64:66])
if sz > 0:
name = dentry[0:sz - 2].decode('utf_16_le', 'replace')
else:
name = u''
t, = struct.unpack('B', dentry[66])
c, = struct.unpack('B', dentry[67])
did_left, = struct.unpack('<l', dentry[68:72])
did_right, = struct.unpack('<l', dentry[72:76])
did_root, = struct.unpack('<l', dentry[76:80])
dentry_start_sid, = struct.unpack('<l', dentry[116:120])
stream_size, = struct.unpack('<L', dentry[120:124])
self.dir_entry_list.extend([(did, sz, name, t, c,
did_left, did_right, did_root,
dentry_start_sid, stream_size)])
if self.dump:
dentry_types = {
0x00: 'Empty',
0x01: 'User storage',
0x02: 'User stream',
0x03: 'LockBytes',
0x04: 'Property',
0x05: 'Root storage'
}
node_colours = {
0x00: 'Red',
0x01: 'Black'
}
print 'total directory entries:', len(self.dir_entry_list)
for dentry in self.dir_entry_list:
(did, sz, name, t, c,
did_left, did_right, did_root,
dentry_start_sid, stream_size) = dentry
print 'DID', did
print 'Size of the used area of the character buffer of the name:', sz
print 'dir entry name:', repr(name)
print 'type of entry:', t, dentry_types[t]
print 'entry colour:', c, node_colours[c]
print 'left child DID :', did_left
print 'right child DID:', did_right
print 'root DID :', did_root
print 'start SID :', dentry_start_sid
print 'stream size :', stream_size
if stream_size == 0:
print 'stream is empty'
elif stream_size >= self.min_stream_size:
print 'stream stored as normal stream'
else:
print 'stream stored as short-stream'
def __build_short_sectors_data(self):
(did, sz, name, t, c,
did_left, did_right, did_root,
dentry_start_sid, stream_size) = self.dir_entry_list[0]
assert t == 0x05 # Short-Stream Container Stream (SSCS) resides in Root Storage
if stream_size == 0:
self.short_sectors_data = ''
else:
self.short_sectors_data = self.get_stream_data(self.data, self.SAT, dentry_start_sid, self.sect_size)
def get_stream_data(self, data, SAT, start_sid, sect_size):
sid = start_sid
chunks = [(sid, sid)]
stream_data = ''
while SAT[sid] >= 0:
next_in_chain = SAT[sid]
last_chunk_start, last_chunk_finish = chunks[-1]
if next_in_chain == last_chunk_finish + 1:
chunks[-1] = last_chunk_start, next_in_chain
else:
chunks.extend([(next_in_chain, next_in_chain)])
sid = next_in_chain
for s, f in chunks:
stream_data += data[s * sect_size:(f + 1) * sect_size]
# print chunks
return stream_data
def print_bin_data(data):
i = 0
while i < len(data):
j = 0
while (i < len(data)) and (j < 16):
c = '0x%02X' % ord(data[i])
sys.stdout.write(c)
sys.stdout.write(' ')
i += 1
j += 1
print
if i == 0:
print '<NO DATA>'
# This implementation writes only 'Root Entry', 'Workbook' streams
# and 2 empty streams for aligning directory stream on sector boundary
#
# LAYOUT:
# 0 header
# 76 MSAT (1st part: 109 SID)
# 512 workbook stream
# ... additional MSAT sectors if streams' size > about 7 Mb == (109*512 * 128)
# ... SAT
# ... directory stream
#
# NOTE: this layout is "ad hoc". It can be more general. RTFM
class XlsDoc:
SECTOR_SIZE = 0x0200
MIN_LIMIT = 0x1000
SID_FREE_SECTOR = -1
SID_END_OF_CHAIN = -2
SID_USED_BY_SAT = -3
SID_USED_BY_MSAT = -4
def __init__(self):
# self.book_stream = '' # padded
self.book_stream_sect = []
self.dir_stream = ''
self.dir_stream_sect = []
self.packed_SAT = ''
self.SAT_sect = []
self.packed_MSAT_1st = ''
self.packed_MSAT_2nd = ''
self.MSAT_sect_2nd = []
self.header = ''
def __build_directory(self): # align on sector boundary
self.dir_stream = ''
dentry_name = '\x00'.join('Root Entry\x00') + '\x00'
dentry_name_sz = len(dentry_name)
dentry_name_pad = '\x00' * (64 - dentry_name_sz)
dentry_type = 0x05 # root storage
dentry_colour = 0x01 # black
dentry_did_left = -1
dentry_did_right = -1
dentry_did_root = 1
dentry_start_sid = -2
dentry_stream_sz = 0
self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L',
dentry_name + dentry_name_pad,
dentry_name_sz,
dentry_type,
dentry_colour,
dentry_did_left,
dentry_did_right,
dentry_did_root,
0, 0, 0, 0, 0, 0, 0, 0, 0,
dentry_start_sid,
dentry_stream_sz,
0
)
dentry_name = '\x00'.join('Workbook\x00') + '\x00'
dentry_name_sz = len(dentry_name)
dentry_name_pad = '\x00' * (64 - dentry_name_sz)
dentry_type = 0x02 # user stream
dentry_colour = 0x01 # black
dentry_did_left = -1
dentry_did_right = -1
dentry_did_root = -1
dentry_start_sid = 0
dentry_stream_sz = self.book_stream_len
self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L',
dentry_name + dentry_name_pad,
dentry_name_sz,
dentry_type,
dentry_colour,
dentry_did_left,
dentry_did_right,
dentry_did_root,
0, 0, 0, 0, 0, 0, 0, 0, 0,
dentry_start_sid,
dentry_stream_sz,
0
)
# padding
dentry_name = ''
dentry_name_sz = len(dentry_name)
dentry_name_pad = '\x00' * (64 - dentry_name_sz)
dentry_type = 0x00 # empty
dentry_colour = 0x01 # black
dentry_did_left = -1
dentry_did_right = -1
dentry_did_root = -1
dentry_start_sid = -2
dentry_stream_sz = 0
self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L',
dentry_name + dentry_name_pad,
dentry_name_sz,
dentry_type,
dentry_colour,
dentry_did_left,
dentry_did_right,
dentry_did_root,
0, 0, 0, 0, 0, 0, 0, 0, 0,
dentry_start_sid,
dentry_stream_sz,
0
) * 2
def __build_sat(self):
# Build SAT
book_sect_count = self.book_stream_len >> 9
dir_sect_count = len(self.dir_stream) >> 9
total_sect_count = book_sect_count + dir_sect_count
SAT_sect_count = 0
MSAT_sect_count = 0
SAT_sect_count_limit = 109
while total_sect_count > 128 * SAT_sect_count or SAT_sect_count > SAT_sect_count_limit:
SAT_sect_count += 1
total_sect_count += 1
if SAT_sect_count > SAT_sect_count_limit:
MSAT_sect_count += 1
total_sect_count += 1
SAT_sect_count_limit += 127
SAT = [self.SID_FREE_SECTOR] * 128 * SAT_sect_count
sect = 0
while sect < book_sect_count - 1:
self.book_stream_sect.append(sect)
SAT[sect] = sect + 1
sect += 1
self.book_stream_sect.append(sect)
SAT[sect] = self.SID_END_OF_CHAIN
sect += 1
while sect < book_sect_count + MSAT_sect_count:
self.MSAT_sect_2nd.append(sect)
SAT[sect] = self.SID_USED_BY_MSAT
sect += 1
while sect < book_sect_count + MSAT_sect_count + SAT_sect_count:
self.SAT_sect.append(sect)
SAT[sect] = self.SID_USED_BY_SAT
sect += 1
while sect < book_sect_count + MSAT_sect_count + SAT_sect_count + dir_sect_count - 1:
self.dir_stream_sect.append(sect)
SAT[sect] = sect + 1
sect += 1
self.dir_stream_sect.append(sect)
SAT[sect] = self.SID_END_OF_CHAIN
sect += 1
self.packed_SAT = struct.pack('<%dl' % (SAT_sect_count * 128), *SAT)
MSAT_1st = [self.SID_FREE_SECTOR] * 109
for i, SAT_sect_num in zip(range(0, 109), self.SAT_sect):
MSAT_1st[i] = SAT_sect_num
self.packed_MSAT_1st = struct.pack('<109l', *MSAT_1st)
MSAT_2nd = [self.SID_FREE_SECTOR] * 128 * MSAT_sect_count
if MSAT_sect_count > 0:
MSAT_2nd[- 1] = self.SID_END_OF_CHAIN
i = 109
msat_sect = 0
sid_num = 0
while i < SAT_sect_count:
if (sid_num + 1) % 128 == 0:
# print 'link: ',
msat_sect += 1
if msat_sect < len(self.MSAT_sect_2nd):
MSAT_2nd[sid_num] = self.MSAT_sect_2nd[msat_sect]
else:
# print 'sid: ',
MSAT_2nd[sid_num] = self.SAT_sect[i]
i += 1
# print sid_num, MSAT_2nd[sid_num]
sid_num += 1
self.packed_MSAT_2nd = struct.pack('<%dl' % (MSAT_sect_count * 128), *MSAT_2nd)
# print vars()
# print zip(range(0, sect), SAT)
# print self.book_stream_sect
# print self.MSAT_sect_2nd
# print MSAT_2nd
# print self.SAT_sect
# print self.dir_stream_sect
def __build_header(self):
doc_magic = '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
file_uid = '\x00' * 16
rev_num = '\x3E\x00'
ver_num = '\x03\x00'
byte_order = '\xFE\xFF'
log_sect_size = struct.pack('<H', 9)
log_short_sect_size = struct.pack('<H', 6)
not_used0 = '\x00' * 10
total_sat_sectors = struct.pack('<L', len(self.SAT_sect))
dir_start_sid = struct.pack('<l', self.dir_stream_sect[0])
not_used1 = '\x00' * 4
min_stream_size = struct.pack('<L', 0x1000)
ssat_start_sid = struct.pack('<l', -2)
total_ssat_sectors = struct.pack('<L', 0)
if len(self.MSAT_sect_2nd) == 0:
msat_start_sid = struct.pack('<l', -2)
else:
msat_start_sid = struct.pack('<l', self.MSAT_sect_2nd[0])
total_msat_sectors = struct.pack('<L', len(self.MSAT_sect_2nd))
self.header = ''.join([doc_magic,
file_uid,
rev_num,
ver_num,
byte_order,
log_sect_size,
log_short_sect_size,
not_used0,
total_sat_sectors,
dir_start_sid,
not_used1,
min_stream_size,
ssat_start_sid,
total_ssat_sectors,
msat_start_sid,
total_msat_sectors
])
def save(self, file_name_or_filelike_obj, stream):
# 1. Align stream on 0x1000 boundary (and therefore on sector boundary)
padding = '\x00' * (0x1000 - (len(stream) % 0x1000))
self.book_stream_len = len(stream) + len(padding)
self.__build_directory()
self.__build_sat()
self.__build_header()
f = file_name_or_filelike_obj
we_own_it = not hasattr(f, 'write')
if we_own_it:
f = open(file_name_or_filelike_obj, 'wb')
f.write(self.header)
f.write(self.packed_MSAT_1st)
f.write(stream)
f.write(padding)
f.write(self.packed_MSAT_2nd)
f.write(self.packed_SAT)
f.write(self.dir_stream)
if we_own_it:
f.close()
|
|
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import oslo_db.exception as exc
import six
import testtools
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_securitygroup.'
'SecurityGroupTestPlugin')
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attr.RESOURCE_ATTRIBUTE_MAP.update(
ext_sg.RESOURCE_ATTRIBUTE_MAP)
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test-tenant',
ethertype=const.IPv4):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max:
data['security_group_rule']['port_range_max'] = port_range_max
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
yield security_group
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol=const.PROTO_NAME_TCP,
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, ethertype=const.IPv4):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
yield security_group_rule
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID."""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in six.iteritems(expected_kvs):
self.assertEqual(security_group_rule[k], v)
class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port,
sgids)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
port['port']['id'] = id
self._process_port_create_security_group(
context, port['port'],
port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=[], limit=None, marker=None,
page_reverse=False):
neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
return neutron_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
ext_mgr = ext_mgr or SecurityGroupTestExtensionManager()
super(SecurityGroupDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEqual(len(sg_rules), 2)
v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
self.assertEqual(len(v4_rules), 1)
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
self.assertEqual(len(v6_rules), 1)
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_skip_duplicate_default_sg_error(self):
num_called = [0]
original_func = self.plugin.create_security_group
def side_effect(context, security_group, default_sg):
# can't always raise, or create_security_group will hang
self.assertTrue(default_sg)
self.assertTrue(num_called[0] < 2)
num_called[0] += 1
ret = original_func(context, security_group, default_sg)
if num_called[0] == 1:
return ret
# make another call to cause an exception.
# NOTE(yamamoto): raising the exception by ourselves
# doesn't update the session state appropriately.
self.assertRaises(exc.DBDuplicateEntry,
original_func, context, security_group,
default_sg)
with mock.patch.object(SecurityGroupTestPlugin,
'create_security_group',
side_effect=side_effect):
self.plugin.create_network(
context.get_admin_context(),
{'network': {'name': 'foo',
'admin_state_up': True,
'shared': False,
'tenant_id': 'bar'}})
def test_update_security_group(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['name'],
data['security_group']['name'])
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_update_security_group_name_to_default_fail(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'default',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_default_security_group_name_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_default_security_group_with_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_check_default_security_group_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual('Default security group',
sg['security_groups'][0]['description'])
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_default_security_group_check_case_insensitive(self):
name = 'DEFAULT'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_list_security_groups(self):
with self.security_group(name='sg1', description='sg') as v1,\
self.security_group(name='sg2', description='sg') as v2,\
self.security_group(name='sg3', description='sg') as v3:
security_groups = (v1, v2, v3)
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
'22', None, None, ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
remote_ip_prefix = bad_prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
name = 'webservers'
description = 'my webservers'
test_addr = {'192.168.1.1/24': 'IPv6',
'2001:db8:1234::/48': 'IPv4',
'192.168.2.1/24': 'BadEthertype'}
for remote_ip_prefix, ethertype in six.iteritems(test_addr):
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_unmasked_prefix(self):
name = 'webservers'
description = 'my webservers'
addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
for ip in addr:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = addr[ip]['ethertype']
remote_ip_prefix = ip
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, 201)
res_sg = self.deserialize(self.fmt, res)
prefix = res_sg['security_group_rule']['remote_ip_prefix']
self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
def test_create_security_group_rule_tcp_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = const.PROTO_NUM_TCP # TCP
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
const.IPv4)
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(len(sg_rule), 3)
sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_get_security_group_on_port_from_wrong_tenant(self):
plugin = manager.NeutronManager.get_plugin()
if not hasattr(plugin, '_get_security_groups_on_port'):
self.skipTest("plugin doesn't use the mixin with this method")
neutron_context = context.get_admin_context()
res = self._create_security_group(self.fmt, 'webservers', 'webservers',
tenant_id='bad_tenant')
sg1 = self.deserialize(self.fmt, res)
with testtools.ExpectedException(ext_sg.SecurityGroupNotFound):
plugin._get_security_groups_on_port(
neutron_context,
{'port': {'security_groups': [sg1['security_group']['id']],
'tenant_id': 'tenant'}}
)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id,
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_admin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context('', 'test-tenant')
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context('', 'test-tenant')
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
self.assertEqual(len(sg), 1)
def test_security_group_port_create_creates_default_security_group(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
sg = self._list('security-groups').get('security_groups')
self.assertEqual(len(sg), 1)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv4,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv6,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_and_code(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# port_range_min (ICMP type) is greater than port_range_max
# (ICMP code) in order to confirm min <= max port check is
# not called for ICMP.
port_range_min = 8
port_range_max = 5
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# ICMP type
port_range_min = 8
# ICMP code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmpv6_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
ethertype = const.IPv6
remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128"
protocol = const.PROTO_NAME_ICMP_V6
# ICMPV6 type
port_range_min = const.ICMPV6_TYPE_RA
# ICMPV6 code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('ethertype', ethertype),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
None, None,
ethertype) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
rule = self._build_security_group_rule(
sg['security_group']['id'],
'ingress', protocol, '50', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_min_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_max_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, None, '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_type_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '256', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_code_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '8', '256')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_with_code_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, None, '2')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'])
req = self.new_list_request('ports')
res = req.get_response(self.api)
ports = self.deserialize(self.fmt, res)
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'],
webob.exc.HTTPConflict.code)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
ret = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
self.assertEqual(2, len(ret['security_group_rules']))
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4(self):
with self.security_group() as sg:
rule = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test-tenant'}
res = self._create_security_group_rule(
self.fmt, {'security_group_rule': rule})
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule_v4 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test-tenant'}
rule_v6 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv6',
'tenant_id': 'test-tenant'}
rules = {'security_group_rules': [rule_v4, rule_v6]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_different_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_specific_id(self):
neutron_context = context.Context('', 'test-tenant')
specified_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
rule['security_group_rule'].update({'id': specified_id,
'port_range_min': None,
'port_range_max': None,
'remote_ip_prefix': None,
'remote_group_id': None})
result = self.plugin.create_security_group_rule(
neutron_context, rule)
self.assertEqual(specified_id, result['id'])
class TestConvertIPPrefixToCIDR(base.BaseTestCase):
def test_convert_bad_ip_prefix_to_cidr(self):
for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
self.assertRaises(n_exc.InvalidCIDR,
ext_sg.convert_ip_prefix_to_cidr, val)
self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
def test_convert_ip_prefix_no_netmask_to_cidr(self):
addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
for k, v in six.iteritems(addr):
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
'%s/%s' % (k, v))
def test_convert_ip_prefix_with_netmask_to_cidr(self):
addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
for addr in addresses:
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(addr), addr)
class TestConvertProtocol(base.BaseTestCase):
def test_convert_numeric_protocol(self):
self.assertIsInstance(ext_sg.convert_protocol('2'), str)
def test_convert_bad_protocol(self):
for val in ['bad', '256', '-1']:
self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol,
ext_sg.convert_protocol, val)
def test_convert_numeric_protocol_to_string(self):
self.assertIsInstance(ext_sg.convert_protocol(2), str)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level,arguments-differ
from os.path import join, basename
import numpy as np
import tensorflow as tf
from third_party.xiuminglib import xiuminglib as xm
from brdf.renderer import SphereRenderer
from brdf.merl.merl import MERL
from nerfactor.networks import mlp
from nerfactor.networks.embedder import Embedder
from nerfactor.networks.layers import LatentCode
from nerfactor.util import logging as logutil, io as ioutil
from nerfactor.models.base import Model as BaseModel
logger = logutil.Logger(loggee="models/brdf")
class Model(BaseModel):
def __init__(self, config, debug=False):
super().__init__(config, debug=debug)
self.mlp_chunk = self.config.getint('DEFAULT', 'mlp_chunk')
# Embedders
self.embedder = self._init_embedder()
# Network components
self.net = self._init_net()
# Get BRDF names
data_dir = self.config.get('DEFAULT', 'data_root')
train_npz = xm.os.sortglob(data_dir, 'train_*.npz')
self.brdf_names = [
basename(x)[len('train_'):-len('.npz')] for x in train_npz]
# Add latent codes to optimize so that they get registered as trainable
z_dim = self.config.getint('DEFAULT', 'z_dim')
z_gauss_mean = self.config.getfloat('DEFAULT', 'z_gauss_mean')
z_gauss_std = self.config.getfloat('DEFAULT', 'z_gauss_std')
normalize_z = self.config.getboolean('DEFAULT', 'normalize_z')
n_brdfs = len(self.brdf_names)
self.latent_code = LatentCode(
n_brdfs, z_dim, mean=z_gauss_mean, std=z_gauss_std,
normalize=normalize_z)
def _init_net(self):
mlp_width = self.config.getint('DEFAULT', 'mlp_width')
mlp_depth = self.config.getint('DEFAULT', 'mlp_depth')
mlp_skip_at = self.config.getint('DEFAULT', 'mlp_skip_at')
net = {}
net['brdf_mlp'] = mlp.Network(
[mlp_width] * mlp_depth, act=['relu'] * mlp_depth,
skip_at=[mlp_skip_at])
net['brdf_out'] = mlp.Network([1], act=['softplus']) # > 0
return net
def _init_embedder(self):
pos_enc = self.config.getboolean('DEFAULT', 'pos_enc')
n_freqs = self.config.getint('DEFAULT', 'n_freqs')
# Shortcircuit if not using embedders
if not pos_enc:
embedder = {'rusink': tf.identity}
return embedder
# Rusink. coordinate embedder
kwargs = {
'incl_input': True,
'in_dims': 3,
'log2_max_freq': n_freqs - 1,
'n_freqs': n_freqs,
'log_sampling': True,
'periodic_func': [tf.math.sin, tf.math.cos]}
embedder_rusink = Embedder(**kwargs)
embedder = {'rusink': embedder_rusink}
return embedder
def call(self, batch, mode='train'):
self._validate_mode(mode)
id_, i, envmap_h, ims, spp, rusink, refl = batch
if mode == 'test' and i[0] == -1:
# Novel identities -- need interpolation
i_w1_mat1_w2_mat2 = id_[0].numpy().decode()
_, w1, mat1, w2, mat2 = i_w1_mat1_w2_mat2.split('_')
w1, w2 = float(w1), float(w2)
i1, i2 = self.brdf_names.index(mat1), self.brdf_names.index(mat2)
z = self.latent_code.interp(w1, i1, w2, i2)
z = tf.tile(z, (id_.shape[0], 1))
else:
z = self.latent_code(i)
brdf, brdf_reci = self._eval_brdf_at(z, rusink)
# For loss computation
pred = {'brdf': brdf, 'brdf_reci': brdf_reci}
gt = {'brdf': refl}
loss_kwargs = {}
# To visualize
to_vis = {
'id': id_, 'i': i, 'z': z, 'gt_brdf': refl,
'envmap_h': envmap_h, 'ims': ims, 'spp': spp}
for k, v in pred.items():
to_vis[k] = v
return pred, gt, loss_kwargs, to_vis
def _eval_brdf_at(self, z, rusink):
mlp_layers = self.net['brdf_mlp']
out_layer = self.net['brdf_out']
# Chunk by chunk to avoid OOM
chunks, chunks_reci = [], []
for i in range(0, rusink.shape[0], self.mlp_chunk):
end_i = min(rusink.shape[0], i + self.mlp_chunk)
z_chunk = z[i:end_i]
rusink_chunk = rusink[i:end_i, :]
rusink_embed = self.embedder['rusink'](rusink_chunk)
z_rusink = tf.concat((z_chunk, rusink_embed), axis=1)
chunk = out_layer(mlp_layers(z_rusink))
chunks.append(chunk)
# Reciprocity
phid = rusink[i:end_i, :1]
thetah_thetad = rusink[i:end_i, 1:]
rusink_chunk = tf.concat((phid + np.pi, thetah_thetad), axis=1)
rusink_embed = self.embedder['rusink'](rusink_chunk)
z_rusink = tf.concat((z_chunk, rusink_embed), axis=1)
chunk = out_layer(mlp_layers(z_rusink))
chunks_reci.append(chunk)
brdf = tf.concat(chunks, axis=0)
brdf_reci = tf.concat(chunks_reci, axis=0)
return brdf, brdf_reci # (n_rusink, 1)
def compute_loss(self, pred, gt, **kwargs):
loss_transform = self.config.get('DEFAULT', 'loss_transform')
if loss_transform.lower() == 'none':
f = tf.identity
elif loss_transform == 'log':
f = tf.math.log
elif loss_transform == 'divide':
f = lambda x: x / (x + 1.) # noqa
else:
raise NotImplementedError(loss_transform)
# Accumulate loss
loss = 0
for weight, loss_func in self.wloss:
loss += weight * loss_func(f(gt['brdf']), f(pred['brdf']), **kwargs)
# Same ground truth for the reciprocal Rusink.
loss += weight * loss_func(
f(gt['brdf']), f(pred['brdf_reci']), **kwargs)
return loss
def vis_batch(
self, data_dict, outdir, mode='train', dump_raw_to=None, n_vis=64):
self._validate_mode(mode)
# Shortcircuit if training
if mode == 'train':
return
# Optionally dump raw to disk
if dump_raw_to is not None:
# ioutil.dump_dict_tensors(data_dict, dump_raw_to)
pass
# "Visualize" metadata
id_ = data_dict['id'][0]
id_ = id_.numpy().decode()
metadata_out = join(outdir, 'metadata.json')
metadata = {'id': id_}
ioutil.write_json(metadata, metadata_out)
# Visualize the latent codes
z = data_dict['z'][0, :].numpy()
z_png = join(outdir, 'z.png')
plot = xm.vis.plot.Plot(outpath=z_png)
plot.bar(z)
# Visualize the BRDF values
pred = data_dict['brdf'].numpy()
pred_reci = data_dict['brdf_reci'].numpy()
brdf_val = np.hstack((pred_reci, pred))
labels = ['Pred. (reci.)', 'Pred.']
if mode == 'vali':
gt = data_dict['gt_brdf'].numpy()
brdf_val = np.hstack((brdf_val, gt))
labels.append('GT')
brdf_val = brdf_val[::int(brdf_val.shape[0] / n_vis), :] # just a subset
brdf_val = np.log10(brdf_val) # log scale
brdf_png = join(outdir, 'log10_brdf.png')
plot = xm.vis.plot.Plot(labels=labels, outpath=brdf_png)
plot.bar(brdf_val)
if mode == 'vali':
return
# If testing, continue to visualize characteristic slice
merl = MERL()
envmap_h = data_dict['envmap_h'][0].numpy()
ims = data_dict['ims'][0].numpy()
spp = data_dict['spp'][0].numpy()
renderer = SphereRenderer(
'point', outdir, envmap_h=envmap_h, envmap_inten=40, ims=ims,
spp=spp)
cslice_out = join(outdir, 'cslice.png')
cslice_shape = merl.cube_rusink.shape[1:]
cslice_end_i = np.prod(cslice_shape[:2])
pred_cslice = pred[:cslice_end_i, :] # first 90x90 are for char. slices
cslice = pred_cslice.reshape(cslice_shape[:2])
cslice_img = merl.characteristic_slice_as_img(cslice)
xm.io.img.write_img(cslice_img, cslice_out)
# ... and render the predicted BRDF
render_out = join(outdir, 'render.png')
pred_render = pred[cslice_end_i:, :] # remaining are for rendering
brdf = np.zeros_like(renderer.lcontrib)
brdf[renderer.lvis.astype(bool)] = pred_render
render = renderer.render(brdf)
xm.io.img.write_arr(render, render_out, clip=True)
def compile_batch_vis(
self, batch_vis_dirs, outpref, mode='train', marker_size=16, fps=2):
"""If in 'test' mode, compiles visualzied results into:
(1) An HTML of reconstructions of seen identities; and
(2) A video of interpolating between seen identities.
"""
viewer_http = self.config.get('DEFAULT', 'viewer_prefix')
vis_dir = join(self.config.get('DEFAULT', 'data_root'), 'vis')
self._validate_mode(mode)
# Shortcircuit if training
if mode == 'train':
return None
# Put optimized latent code and BRDF value visualizations into HTML
rows, caps, types = [], [], []
# For each batch (which has just one sample)
for batch_dir in batch_vis_dirs:
metadata_path = join(batch_dir, 'metadata.json')
metadata = ioutil.read_json(metadata_path)
id_ = metadata['id']
metadata = str(metadata)
row = [
metadata,
join(batch_dir, 'z.png'),
join(batch_dir, 'log10_brdf.png')]
rowcaps = ["Metadata", "Latent Code", "BRDF (log-scale)"]
rowtypes = ['text', 'image', 'image']
# If we are testing, additional columns for char. slices and renders
if mode == 'test':
pred_cslice_path = join(batch_dir, 'cslice.png')
pred_render_path = join(batch_dir, 'render.png')
if '_' in id_:
# Interpolated identities
row_extra = [
pred_cslice_path, pred_render_path, "N/A", "N/A"]
rowtypes_extra = ['image', 'image', 'text', 'text']
else:
# Seen identities
gt_cslice_path = join(
vis_dir, 'cslice_achromatic', id_ + '.png')
gt_render_path = join(
vis_dir, 'render_achromatic', id_ + '.png')
row_extra = [
pred_cslice_path, pred_render_path, gt_cslice_path,
gt_render_path]
rowtypes_extra = ['image', 'image', 'image', 'image']
row += row_extra
rowcaps += [
"Pred. (char. slice)", "Pred. (render)", "GT (char. slice)",
"GT (render)"]
rowtypes += rowtypes_extra
rows.append(row)
caps.append(rowcaps)
types.append(rowtypes)
n_rows = len(rows)
assert n_rows > 0, "No row"
# Make HTML
html = xm.vis.html.HTML(bgcolor='white', text_color='black')
html.add_header("BRDF-MLP")
img_table = html.add_table()
for r, rcaps, rtypes in zip(rows, caps, types):
img_table.add_row(r, rtypes, captions=rcaps)
# Write HTML
out_html = outpref + '.html'
html_save = xm.decor.colossus_interface(html.save)
html_save(out_html)
view_at = viewer_http + out_html
# Done if validation
if mode == 'vali':
return view_at
# Testing, so continue to make a video for interpolation
frame_ind, frames = [], []
for batch_dir in batch_vis_dirs:
metadata_path = join(batch_dir, 'metadata.json')
metadata = ioutil.read_json(metadata_path)
id_ = metadata['id']
# Skip if this is a seen identity
if '_' not in id_:
continue
i, w1, mat1_id, w2, mat2_id = id_.split('_')
i = int(i)
w1, w2 = float(w1), float(w2)
mat1_path = join(vis_dir, 'render_achromatic', mat1_id + '.png')
mat2_path = join(vis_dir, 'render_achromatic', mat2_id + '.png')
pred_path = join(batch_dir, 'render.png')
mat1 = xm.io.img.load(mat1_path)
mat2 = xm.io.img.load(mat2_path)
pred = xm.io.img.load(pred_path)
# Resize according to width because we will vertically concat.
mat1 = xm.img.resize(mat1, new_w=pred.shape[1])
mat2 = xm.img.resize(mat2, new_w=pred.shape[1])
# Label the maps
font_size = int(0.06 * pred.shape[1])
label_kwargs = {
'font_color': (0, 0, 0), 'font_size': font_size,
'font_ttf': xm.const.Path.open_sans_regular}
mat1_labeled = xm.vis.text.put_text(mat1, "Mat. 1", **label_kwargs)
mat2_labeled = xm.vis.text.put_text(mat2, "Mat. 2", **label_kwargs)
marker_i = int(w2 * pred.shape[0])
marker_vstart = max(0, marker_i - marker_size // 2)
marker_vend = min(marker_i + marker_size // 2, pred.shape[0] - 1)
maxv = np.iinfo(pred.dtype).max
red = np.array((maxv, 0, 0)).reshape((1, 1, 3))
pred[marker_vstart:marker_vend, :marker_size, :] = red
frame = np.vstack((mat1_labeled, pred, mat2_labeled))
frames.append(frame)
frame_ind.append(i)
outvid = outpref + '.mp4'
frames_sort = [
y for (x, y) in sorted(
zip(frame_ind, frames), key=lambda pair: pair[0])]
xm.vis.video.make_video(frames_sort, outpath=outvid, fps=fps)
view_at += '\n\t%s' % (viewer_http + outvid)
return view_at # to be logged into TensorBoard
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import time
import unittest
from mock import call
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.manager import BaseManager
from pyrax.clouddns import assure_domain
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import ResultsIterator
from pyrax.clouddns import DomainResultsIterator
from pyrax.clouddns import SubdomainResultsIterator
from pyrax.clouddns import RecordResultsIterator
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
example_uri = "http://example.com"
class CloudDNSTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudDNSTest, self).__init__(*args, **kwargs)
def setUp(self):
super(CloudDNSTest, self).setUp()
self.client = fakes.FakeDNSClient()
self.client._manager = fakes.FakeDNSManager(self.client)
self.client._manager._set_delay(0.000001)
self.domain = fakes.FakeDNSDomain()
self.domain.manager = self.client._manager
def tearDown(self):
super(CloudDNSTest, self).tearDown()
self.client = None
self.domain = None
def test_assure_domain(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
d1 = test(clt, dom)
self.assertEqual(d1, dom)
self.assertTrue(isinstance(d1, CloudDNSDomain))
def test_assure_domain_id(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
clt._manager._get = Mock(return_value=dom)
d2 = test(clt, dom.id)
self.assertEqual(d2, dom)
self.assertTrue(isinstance(d2, CloudDNSDomain))
def test_assure_domain_name(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
clt._manager._get = Mock(side_effect=exc.NotFound(""))
clt._manager._list = Mock(return_value=[dom])
d3 = test(clt, dom.name)
self.assertEqual(d3, dom)
self.assertTrue(isinstance(d3, CloudDNSDomain))
def test_set_timeout(self):
clt = self.client
mgr = clt._manager
new_timeout = random.randint(0, 99)
clt.set_timeout(new_timeout)
self.assertEqual(mgr._timeout, new_timeout)
def test_set_delay(self):
clt = self.client
mgr = clt._manager
new_delay = random.randint(0, 99)
clt.set_delay(new_delay)
self.assertEqual(mgr._delay, new_delay)
def test_reset_paging_all(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["total_entries"] = 99
mgr._paging["record"]["next_uri"] = example_uri
mgr._reset_paging("all")
self.assertIsNone(mgr._paging["domain"]["total_entries"])
self.assertIsNone(mgr._paging["record"]["next_uri"])
def test_reset_paging_body(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["total_entries"] = 99
mgr._paging["domain"]["next_uri"] = "FAKE"
exp_entries = random.randint(100, 200)
uri_string_next = utils.random_unicode()
next_uri = "%s/domains/%s" % (example_uri, uri_string_next)
uri_string_prev = utils.random_unicode()
prev_uri = "%s/domains/%s" % (example_uri, uri_string_prev)
body = {"totalEntries": exp_entries,
"links": [
{"href": next_uri,
"rel": "next"},
{"href": prev_uri,
"rel": "previous"}]}
mgr._reset_paging("domain", body=body)
self.assertEqual(mgr._paging["domain"]["total_entries"], exp_entries)
self.assertEqual(mgr._paging["domain"]["next_uri"], "/domains/%s" %
uri_string_next)
self.assertEqual(mgr._paging["domain"]["prev_uri"], "/domains/%s" %
uri_string_prev)
def test_get_pagination_qs(self):
clt = self.client
mgr = clt._manager
test_limit = random.randint(1, 100)
test_offset = random.randint(1, 100)
qs = mgr._get_pagination_qs(test_limit, test_offset)
self.assertEqual(qs, "?limit=%s&offset=%s" % (test_limit, test_offset))
def test_manager_list(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
ret = clt.list()
self.assertEqual(len(ret), 1)
def test_manager_list_all(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
uri_string_next = utils.random_unicode()
next_uri = "%s/domains/%s" % (example_uri, uri_string_next)
mgr.count = 0
def mock_get(uri):
if mgr.count:
return ({}, ret_body)
mgr.count += 1
ret = {"totalEntries": 2,
"links": [
{"href": next_uri,
"rel": "next"}]}
ret.update(ret_body)
return ({}, ret)
clt.method_get = Mock(wraps=mock_get)
ret = mgr._list(example_uri, list_all=True)
self.assertEqual(len(ret), 2)
def test_list_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["prev_uri"] = example_uri
mgr._list = Mock()
clt.list_previous_page()
mgr._list.assert_called_once_with(example_uri)
def test_list_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_previous_page)
def test_list_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["next_uri"] = example_uri
mgr._list = Mock()
clt.list_next_page()
mgr._list.assert_called_once_with(example_uri)
def test_list_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_next_page)
def test_list_subdomains_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["subdomain"]["prev_uri"] = example_uri
mgr._list_subdomains = Mock()
clt.list_subdomains_previous_page()
mgr._list_subdomains.assert_called_once_with(example_uri)
def test_list_subdomains_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_previous_page)
def test_list_subdomains_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["subdomain"]["next_uri"] = example_uri
mgr._list_subdomains = Mock()
clt.list_subdomains_next_page()
mgr._list_subdomains.assert_called_once_with(example_uri)
def test_list_subdomains_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_next_page)
def test_list_records_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["record"]["prev_uri"] = example_uri
mgr._list_records = Mock()
clt.list_records_previous_page()
mgr._list_records.assert_called_once_with(example_uri)
def test_list_records_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_previous_page)
def test_list_records_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["record"]["next_uri"] = example_uri
mgr._list_records = Mock()
clt.list_records_next_page()
mgr._list_records.assert_called_once_with(example_uri)
def test_list_records_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_next_page)
def test_manager_get(self):
ret_body = {"recordsList": {
"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}}
mgr = self.client._manager
mgr.api.method_get = Mock(return_value=(None, ret_body))
dom = mgr._get("fake")
self.assertTrue(isinstance(dom, CloudDNSDomain))
def test_manager_create(self):
clt = self.client
mgr = clt._manager
ret_body = {"callbackUrl": example_uri,
"status": "RUNNING"}
mgr.api.method_post = Mock(return_value=(None, ret_body))
stat_body = {"status": "complete",
"response": {mgr.response_key: [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}}
mgr.api.method_get = Mock(return_value=(None, stat_body))
dom = mgr._create("fake", {})
self.assertTrue(isinstance(dom, CloudDNSDomain))
def test_manager_create_error(self):
clt = self.client
mgr = clt._manager
ret_body = {"callbackUrl": example_uri,
"status": "RUNNING"}
mgr.api.method_post = Mock(return_value=(None, ret_body))
stat_body = {"status": "ERROR",
"error": {
"details": "fail",
"code": 666}}
mgr.api.method_get = Mock(return_value=(None, stat_body))
self.assertRaises(exc.DomainCreationFailed, mgr._create, "fake", {})
def test_manager_findall(self):
clt = self.client
mgr = clt._manager
mgr._list = Mock()
mgr.findall(name="fake")
mgr._list.assert_called_once_with("/domains?name=fake", list_all=True)
def test_manager_findall_default(self):
clt = self.client
mgr = clt._manager
sav = BaseManager.findall
BaseManager.findall = Mock()
mgr.findall(foo="bar")
BaseManager.findall.assert_called_once_with(foo="bar")
BaseManager.findall = sav
def test_manager_empty_get_body_error(self):
clt = self.client
mgr = clt._manager
mgr.api.method_get = Mock(return_value=(None, None))
self.assertRaises(exc.ServiceResponseFailure, mgr.list)
def test_create_body(self):
mgr = self.client._manager
fake_name = utils.random_unicode()
body = mgr._create_body(fake_name, "fake@fake.com")
self.assertEqual(body["domains"][0]["name"], fake_name)
def test_async_call_body(self):
clt = self.client
mgr = clt._manager
body = {"fake": "fake"}
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "COMPLETE"}
method = "PUT"
clt.method_put = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, body=body, method=method)
clt.method_put.assert_called_once_with(uri, body=body)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp["response"]))
def test_async_call_no_body(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "COMPLETE"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, method=method)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp["response"]))
def test_async_call_no_response(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"status": "COMPLETE"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, method=method, has_response=False)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp))
def test_async_call_timeout(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
clt.set_timeout(0.000001)
clt.method_get = Mock(return_value=({}, {"callbackUrl": callback_uri,
"status": "RUNNING"}))
self.assertRaises(exc.DNSCallTimedOut, mgr._async_call, uri,
method="GET")
def test_async_call_error(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "ERROR"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
err_class = exc.DomainRecordDeletionFailed
err = err_class("oops")
mgr._process_async_error = Mock(side_effect=err)
self.assertRaises(err_class,
mgr._async_call, uri, method=method, error_class=err_class)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
mgr._process_async_error.assert_called_once_with(get_resp, err_class)
def test_process_async_error(self):
clt = self.client
mgr = clt._manager
err = {"error": {"message": "fake", "details": "", "code": 400}}
err_class = exc.DomainRecordDeletionFailed
self.assertRaises(err_class, mgr._process_async_error, err, err_class)
def test_process_async_error_nested(self):
clt = self.client
mgr = clt._manager
err = {"error": {
"failedItems": {"faults": [
{"message": "fake1", "details": "", "code": 400},
{"message": "fake2", "details": "", "code": 400},
]}}}
err_class = exc.DomainRecordDeletionFailed
self.assertRaises(err_class, mgr._process_async_error, err, err_class)
def test_changes_since(self):
clt = self.client
dom = self.domain
clt.method_get = Mock(return_value=({}, {"changes": ["fake"]}))
dt = "2012-01-01"
ret = clt.changes_since(dom, dt)
uri = "/domains/%s/changes?since=2012-01-01T00:00:00+0000" % dom.id
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, ["fake"])
def test_export_domain(self):
clt = self.client
dom = self.domain
export = utils.random_unicode()
clt._manager._async_call = Mock(return_value=({}, {"contents": export}))
ret = clt.export_domain(dom)
uri = "/domains/%s/export" % dom.id
clt._manager._async_call.assert_called_once_with(uri,
error_class=exc.NotFound, method="GET")
self.assertEqual(ret, export)
def test_import_domain(self):
clt = self.client
mgr = clt._manager
data = utils.random_unicode()
mgr._async_call = Mock(return_value=({}, "fake"))
req_body = {"domains": [{
"contentType": "BIND_9",
"contents": data,
}]}
ret = clt.import_domain(data)
mgr._async_call.assert_called_once_with("/domains/import",
method="POST", body=req_body,
error_class=exc.DomainCreationFailed)
def test_update_domain_empty(self):
self.assertRaises(exc.MissingDNSSettings, self.client.update_domain,
self.domain)
def test_update_domain(self):
clt = self.client
dom = self.domain
mgr = clt._manager
emailAddress = None
comment = utils.random_unicode()
ttl = 666
mgr._async_call = Mock(return_value=({}, "fake"))
uri = "/domains/%s" % utils.get_id(dom)
req_body = {"comment": comment,
"ttl": ttl,
}
ret = clt.update_domain(dom, emailAddress, ttl, comment)
mgr._async_call.assert_called_once_with(uri, method="PUT",
body=req_body, error_class=exc.DomainUpdateFailed,
has_response=False)
def test_delete(self):
clt = self.client
mgr = clt._manager
dom = self.domain
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s" % utils.get_id(dom)
clt.delete(dom)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False)
def test_delete_subdomains(self):
clt = self.client
mgr = clt._manager
dom = self.domain
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s?deleteSubdomains=true" % utils.get_id(dom)
clt.delete(dom, delete_subdomains=True)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False)
def test_list_subdomains(self):
clt = self.client
mgr = clt._manager
dom = self.domain
resp_body = {'Something': 'here'}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains?name=%s&limit=5" % dom.name
clt.list_subdomains(dom, limit=5)
clt.method_get.assert_called_once_with(uri)
def test_list_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
resp_body = {'Something': 'here'}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains/%s/records" % utils.get_id(dom)
clt.list_records(dom)
clt.method_get.assert_called_once_with(uri)
def test_search_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
uri = "/domains/%s/records?type=%s" % (utils.get_id(dom), typ)
ret_body = {"records": [{"type": typ}]}
mgr.count = 0
def mock_get(uri):
if mgr.count:
return ({}, ret_body)
mgr.count += 1
ret = {"totalEntries": 2,
"links": [
{"href": uri,
"rel": "next"}]}
ret.update(ret_body)
return ({}, ret)
clt.method_get = Mock(wraps=mock_get)
clt.search_records(dom, typ)
calls = [call(uri), call(uri)]
clt.method_get.assert_has_calls(calls)
def test_search_records_params(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
resp_body = {"Something": "here"}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
clt.search_records(dom, typ, name=nm, data=data)
clt.method_get.assert_called_once_with(uri)
def test_find_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
clt.find_record(dom, typ, name=nm, data=data)
clt.method_get.assert_called_once_with(uri)
def test_find_record_not_found(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": []}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
self.assertRaises(exc.DomainRecordNotFound, clt.find_record, dom, typ,
name=nm, data=data)
def test_find_record_not_unique(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}, {"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
self.assertRaises(exc.DomainRecordNotUnique, clt.find_record, dom, typ,
name=nm, data=data)
def test_add_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
rec = {"type": "A", "name": "example.com", "data": "0.0.0.0"}
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records" % utils.get_id(dom)
clt.add_records(dom, rec)
mgr._async_call.assert_called_once_with(uri, method="POST",
body={"records": [rec]},
error_class=exc.DomainRecordAdditionFailed,
has_response=False)
def test_get_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
nm = utils.random_unicode()
rec_id = utils.random_unicode()
rec_dict = {"id": rec_id, "name": nm}
mgr.api.method_get = Mock(return_value=(None, rec_dict))
ret = clt.get_record(dom, rec_id)
mgr.api.method_get.assert_called_once_with("/%s/%s/records/%s" %
(mgr.uri_base, dom.id, rec_id))
def test_update_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
nm = utils.random_unicode()
rec_id = utils.random_unicode()
rec = fakes.FakeDNSRecord(mgr, {"id": rec_id, "name": nm})
ttl = 9999
data = "0.0.0.0"
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records" % utils.get_id(dom)
req_body = {"id": rec_id, "name": nm, "data": data, "ttl": ttl}
clt.update_record(dom, rec, data=data, ttl=ttl)
mgr._async_call.assert_called_once_with(uri, method="PUT",
body={"records": [req_body]},
error_class=exc.DomainRecordUpdateFailed,
has_response=False)
def test_delete_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
rec = CloudDNSRecord(mgr, {"id": utils.random_unicode()})
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records/%s" % (utils.get_id(dom), utils.get_id(rec))
clt.delete_record(dom, rec)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainRecordDeletionFailed,
has_response=False)
def test_resolve_device_type(self):
clt = self.client
mgr = clt._manager
device = fakes.FakeDNSDevice()
typ = mgr._resolve_device_type(device)
self.assertEqual(typ, "loadbalancer")
device = fakes.FakeLoadBalancer()
typ = mgr._resolve_device_type(device)
self.assertEqual(typ, "loadbalancer")
def test_resolve_device_type_invalid(self):
clt = self.client
mgr = clt._manager
device = object()
self.assertRaises(exc.InvalidDeviceType, mgr._resolve_device_type,
device)
def test_get_ptr_details_lb(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
dvc_type = "loadbalancer"
sav = pyrax._get_service_endpoint
pyrax._get_service_endpoint = Mock(return_value=example_uri)
expected_href = "%s/loadbalancers/%s" % (example_uri, dvc.id)
href, svc_name = mgr._get_ptr_details(dvc, dvc_type)
self.assertEqual(svc_name, "cloudLoadBalancers")
self.assertEqual(href, expected_href)
pyrax._get_service_endpoint = sav
def test_list_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
uri = "/rdns/%s?href=%s" % (svc_name, href)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
clt.method_get = Mock(return_value=({}, {"records": []}))
ret = clt.list_ptr_records(dvc)
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, [])
def test_list_ptr_records_not_found(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
uri = "/rdns/%s?href=%s" % (svc_name, href)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
clt.method_get = Mock(side_effect=exc.NotFound(""))
ret = clt.list_ptr_records(dvc)
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, [])
def test_add_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
rec = {"foo": "bar"}
body = {"recordsList": {"records": [rec]},
"link": {"content": "", "href": href, "rel": svc_name}}
uri = "/rdns"
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
clt.add_ptr_records(dvc, rec)
mgr._async_call.assert_called_once_with(uri, body=body,
error_class=exc.PTRRecordCreationFailed, method="POST")
def test_update_ptr_record(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
ptr_record = fakes.FakeDNSPTRRecord({"id": utils.random_unicode()})
ttl = 9999
data = "0.0.0.0"
long_comment = "x" * 200
trim_comment = long_comment[:160]
nm = "example.com"
rec = {"name": nm, "id": ptr_record.id, "type": "PTR", "data": data,
"ttl": ttl, "comment": trim_comment}
uri = "/rdns"
body = {"recordsList": {"records": [rec]}, "link": {"content": "",
"href": href, "rel": svc_name}}
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
clt.update_ptr_record(dvc, ptr_record, domain_name=nm, data=data,
ttl=ttl, comment=long_comment)
mgr._async_call.assert_called_once_with(uri, body=body,
error_class=exc.PTRRecordUpdateFailed, method="PUT",
has_response=False)
def test_delete_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
ip_address = "0.0.0.0"
uri = "/rdns/%s?href=%s&ip=%s" % (svc_name, href, ip_address)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
ret = clt.delete_ptr_records(dvc, ip_address=ip_address)
mgr._async_call.assert_called_once_with(uri,
error_class=exc.PTRRecordDeletionFailed,
method="DELETE", has_response=False)
def test_get_absolute_limits(self):
clt = self.client
rand_limit = utils.random_unicode()
resp = {"limits": {"absolute": rand_limit}}
clt.method_get = Mock(return_value=({}, resp))
ret = clt.get_absolute_limits()
self.assertEqual(ret, rand_limit)
def test_get_rate_limits(self):
clt = self.client
limits = [{"uri": "fake1", "limit": 1},
{"uri": "fake2", "limit": 2}]
resp = {"limits": {"rate": limits}}
resp_limits = [{"uri": "fake1", "limits": 1},
{"uri": "fake2", "limits": 2}]
clt.method_get = Mock(return_value=({}, resp))
ret = clt.get_rate_limits()
self.assertEqual(ret, resp_limits)
def test_results_iterator(self):
clt = self.client
mgr = clt._manager
self.assertRaises(NotImplementedError, ResultsIterator, mgr)
def test_iter(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
ret = res_iter.__iter__()
self.assertTrue(ret is res_iter)
def test_iter_next(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
clt.method_get = Mock(return_value=({}, {"domains": []}))
self.assertRaises(StopIteration, res_iter.next)
def test_iter_items_first_fetch(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
res_iter = DomainResultsIterator(mgr)
ret = res_iter.next()
self.assertTrue(isinstance(ret, CloudDNSDomain))
clt.method_get.assert_called_once_with("/domains")
def test_iter_items_next_fetch(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
res_iter = DomainResultsIterator(mgr)
res_iter.next_uri = example_uri
ret = res_iter.next()
self.assertTrue(isinstance(ret, CloudDNSDomain))
def test_iter_items_next_stop(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
res_iter.next_uri = None
self.assertRaises(StopIteration, res_iter.next)
def test_subdomain_iter(self):
clt = self.client
mgr = clt._manager
res_iter = SubdomainResultsIterator(mgr)
self.assertEqual(res_iter.paging_service, "subdomain")
def test_record_iter(self):
clt = self.client
mgr = clt._manager
res_iter = RecordResultsIterator(mgr)
self.assertEqual(res_iter.paging_service, "record")
# patch BaseClients method_get to make it always return an empty
# body. client method_get uses super to get at BaseClient's
# method_get.
@patch.object(pyrax.client.BaseClient, "method_get",
new=lambda x, y: (None, None))
def test_client_empty_get_body_error(self):
clt = self.client
self.assertRaises(exc.ServiceResponseFailure, clt.get_absolute_limits)
if __name__ == "__main__":
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
from oslo.config import cfg
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.TestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertTrue(isinstance(self.driver.driver,
fake.FakeDriver))
self.assertTrue(isinstance(self.driver.vif_driver,
fake.FakeVifDriver))
self.assertTrue(isinstance(self.driver.volume_driver,
fake.FakeVolumeDriver))
self.assertTrue(isinstance(self.driver.firewall_driver,
fake.FakeFirewallDriver))
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
result['instance'] = utils.get_test_instance()
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertTrue(isinstance(stats, list))
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertEqual(row['instance_uuid'], None)
self.assertEqual(row['instance_name'], None)
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import itertools
from pymatgen.core.lattice import Lattice
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.operations import SymmOp
class LatticeTestCase(PymatgenTest):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.cubic = self.lattice
self.tetragonal = Lattice.tetragonal(10, 20)
self.orthorhombic = Lattice.orthorhombic(10, 20, 30)
self.monoclinic = Lattice.monoclinic(10, 20, 30, 66)
self.hexagonal = Lattice.hexagonal(10, 20)
self.rhombohedral = Lattice.rhombohedral(10, 77)
family_names = ["cubic", "tetragonal", "orthorhombic", "monoclinic",
"hexagonal", "rhombohedral"]
self.families = {}
for name in family_names:
self.families[name] = getattr(self, name)
def test_init(self):
a = 9.026
lattice = Lattice.cubic(a)
self.assertIsNotNone(lattice, "Initialization from new_cubic failed")
lattice2 = Lattice([[a, 0, 0], [0, a, 0], [0, 0, a]])
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(lattice.matrix[i][j],
lattice2.matrix[i][j], 5,
"Inconsistent matrix from two inits!")
def test_copy(self):
cubic_copy = self.cubic.copy()
self.assertTrue(cubic_copy == self.cubic)
self.assertFalse(cubic_copy._matrix is self.cubic._matrix)
def test_get_cartesian_or_frac_coord(self):
coord = self.lattice.get_cartesian_coords([0.15, 0.3, 0.4])
self.assertArrayAlmostEqual(coord, [1.5, 3., 4.])
self.assertArrayAlmostEqual(
self.tetragonal.get_fractional_coords([12.12312, 45.2134,
1.3434]),
[1.212312, 4.52134, 0.06717])
#Random testing that get_cart and get_frac coords reverses each other.
rand_coord = np.random.random_sample(3)
coord = self.tetragonal.get_cartesian_coords(rand_coord)
fcoord = self.tetragonal.get_fractional_coords(coord)
self.assertArrayAlmostEqual(fcoord, rand_coord)
def test_reciprocal_lattice(self):
recip_latt = self.lattice.reciprocal_lattice
self.assertArrayAlmostEqual(recip_latt.matrix,
0.628319 * np.eye(3), 5)
self.assertArrayAlmostEqual(self.tetragonal.reciprocal_lattice.matrix,
[[0.628319, 0., 0.], [0., 0.628319, 0],
[0., 0., 0.3141590]], 5)
#Test the crystallographic version.
recip_latt_xtal = self.lattice.reciprocal_lattice_crystallographic
self.assertArrayAlmostEqual(recip_latt.matrix,
recip_latt_xtal.matrix * 2 * np.pi,
5)
def test_static_methods(self):
lengths_c = [3.840198, 3.84019885, 3.8401976]
angles_c = [119.99998575, 90, 60.00000728]
mat_c = [[3.840198, 0.000000, 0.0000], [1.920099, 3.325710, 0.000000],
[0.000000, -2.217138, 3.135509]]
#should give the lengths and angles above
newlatt = Lattice(mat_c)
(lengths, angles) = newlatt.lengths_and_angles
for i in range(0, 3):
self.assertAlmostEqual(lengths[i], lengths_c[i], 5,
"Lengths incorrect!")
self.assertAlmostEqual(angles[i], angles_c[i], 5,
"Angles incorrect!")
(lengths, angles) = \
Lattice.from_lengths_and_angles(lengths, angles).lengths_and_angles
for i in range(0, 3):
self.assertAlmostEqual(lengths[i], lengths_c[i], 5,
"Lengths incorrect!")
self.assertAlmostEqual(angles[i], angles_c[i], 5,
"Angles incorrect!")
def test_attributes(self):
"""docstring for test_attributes"""
lattice = Lattice.cubic(10.0)
self.assertEqual(lattice.a, 10.0)
self.assertEqual(lattice.b, 10.0)
self.assertEqual(lattice.c, 10.0)
self.assertAlmostEqual(lattice.volume, 1000.0)
xyz = lattice.get_cartesian_coords([0.25, 0.35, 0.45])
self.assertEqual(xyz[0], 2.5)
self.assertEqual(xyz[1], 3.5)
self.assertEqual(xyz[2], 4.5)
def test_consistency(self):
"""
when only lengths and angles are given for constructors, the
internal matrix representation is ambiguous since the lattice rotation
is not specified.
This test makes sure that a consistent definition is specified for the
lattice rotation when using different constructors from lengths angles
"""
l = [3.840198, 3.84019885, 3.8401976]
a = [119.99998575, 90, 60.00000728]
mat1 = Lattice.from_lengths_and_angles(l, a).matrix
mat2 = Lattice.from_parameters(l[0], l[1], l[2],
a[0], a[1], a[2]).matrix
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(mat1[i][j], mat2[i][j], 5)
def test_get_lll_reduced_lattice(self):
lattice = Lattice([1.0, 1, 1, -1.0, 0, 2, 3.0, 5, 6])
reduced_latt = lattice.get_lll_reduced_lattice()
expected_ans = Lattice(np.array(
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -2.0, 0.0, 1.0]).reshape((3, 3)))
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
self.assertAlmostEqual(reduced_latt.volume, lattice.volume)
latt = [7.164750, 2.481942, 0.000000,
- 4.298850, 2.481942, 0.000000,
0.000000, 0.000000, 14.253000]
expected_ans = Lattice(np.array(
[-4.298850, 2.481942, 0.000000, 2.865900, 4.963884, 0.000000,
0.000000, 0.000000, 14.253000]))
reduced_latt = Lattice(latt).get_lll_reduced_lattice()
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
expected_ans = Lattice([0.0, 10.0, 10.0,
10.0, 10.0, 0.0,
30.0, -30.0, 40.0])
lattice = np.array([100., 0., 10., 10., 10., 20., 10., 10., 10.])
lattice = lattice.reshape(3, 3)
lattice = Lattice(lattice.T)
reduced_latt = lattice.get_lll_reduced_lattice()
self.assertAlmostEqual(
np.linalg.det(np.linalg.solve(expected_ans.matrix,
reduced_latt.matrix)),
1)
self.assertArrayAlmostEqual(
sorted(reduced_latt.abc), sorted(expected_ans.abc))
random_latt = Lattice(np.random.random((3, 3)))
if np.linalg.det(random_latt.matrix) > 1e-8:
reduced_random_latt = random_latt.get_lll_reduced_lattice()
self.assertAlmostEqual(reduced_random_latt.volume,
random_latt.volume)
def test_get_niggli_reduced_lattice(self):
latt = Lattice.from_parameters(3, 5.196, 2, 103 + 55 / 60,
109 + 28 / 60,
134 + 53 / 60)
reduced_cell = latt.get_niggli_reduced_lattice()
abc, angles = reduced_cell.lengths_and_angles
self.assertAlmostEqual(abc[0], 2, 3)
self.assertAlmostEqual(abc[1], 3, 3)
self.assertAlmostEqual(abc[2], 3, 3)
self.assertAlmostEqual(angles[0], 116.382855225, 3)
self.assertAlmostEqual(angles[1], 94.769790287999996, 3)
self.assertAlmostEqual(angles[2], 109.466666667, 3)
mat = [[5.0, 0, 0], [0, 5.0, 0], [5.0, 0, 5.0]]
latt = Lattice(np.dot([[1, 1, 1], [1, 1, 0], [0, 1, 1]], mat))
reduced_cell = latt.get_niggli_reduced_lattice()
abc, angles = reduced_cell.lengths_and_angles
for l in abc:
self.assertAlmostEqual(l, 5, 3)
for a in angles:
self.assertAlmostEqual(a, 90, 3)
latt = Lattice([1.432950, 0.827314, 4.751000, -1.432950, 0.827314,
4.751000, 0.0, -1.654628, 4.751000])
ans = [[-1.432950, -2.481942, 0.0],
[-2.8659, 0.0, 0.0],
[-1.432950, -0.827314, -4.751000]]
self.assertArrayAlmostEqual(latt.get_niggli_reduced_lattice().matrix,
ans)
latt = Lattice.from_parameters(7.365450, 6.199506, 5.353878,
75.542191, 81.181757, 156.396627)
ans = [[2.578932, 0.826965, 0.000000],
[-0.831059, 2.067413, 1.547813],
[-0.458407, -2.480895, 1.129126]]
self.assertArrayAlmostEqual(latt.get_niggli_reduced_lattice().matrix,
np.array(ans), 5)
def test_find_mapping(self):
m = np.array([[0.1, 0.2, 0.3], [-0.1, 0.2, 0.7], [0.6, 0.9, 0.2]])
latt = Lattice(m)
op = SymmOp.from_origin_axis_angle([0, 0, 0], [2, 3, 3], 35)
rot = op.rotation_matrix
scale = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
latt2 = Lattice(np.dot(rot, np.dot(scale, m).T).T)
(aligned_out, rot_out, scale_out) = latt2.find_mapping(latt)
self.assertAlmostEqual(abs(np.linalg.det(rot)), 1)
rotated = SymmOp.from_rotation_and_translation(rot_out).operate_multi(latt.matrix)
self.assertArrayAlmostEqual(rotated, aligned_out.matrix)
self.assertArrayAlmostEqual(np.dot(scale_out, latt2.matrix), aligned_out.matrix)
self.assertArrayAlmostEqual(aligned_out.lengths_and_angles, latt.lengths_and_angles)
self.assertFalse(np.allclose(aligned_out.lengths_and_angles,
latt2.lengths_and_angles))
def test_find_all_mappings(self):
m = np.array([[0.1, 0.2, 0.3], [-0.1, 0.2, 0.7], [0.6, 0.9, 0.2]])
latt = Lattice(m)
op = SymmOp.from_origin_axis_angle([0, 0, 0], [2, -1, 3], 40)
rot = op.rotation_matrix
scale = np.array([[0, 2, 0], [1, 1, 0], [0,0,1]])
latt2 = Lattice(np.dot(rot, np.dot(scale, m).T).T)
for (aligned_out, rot_out, scale_out) in latt.find_all_mappings(latt2):
self.assertArrayAlmostEqual(np.inner(latt2.matrix, rot_out),
aligned_out.matrix, 5)
self.assertArrayAlmostEqual(np.dot(scale_out, latt.matrix),
aligned_out.matrix)
self.assertArrayAlmostEqual(aligned_out.lengths_and_angles, latt2.lengths_and_angles)
self.assertFalse(np.allclose(aligned_out.lengths_and_angles,
latt.lengths_and_angles))
latt = Lattice.orthorhombic(9, 9, 5)
self.assertEqual(len(list(latt.find_all_mappings(latt))), 16)
#catch the singular matrix error
latt = Lattice.from_lengths_and_angles([1,1,1], [10,10,10])
for l, _, _ in latt.find_all_mappings(latt, ltol=0.05, atol=11):
self.assertTrue(isinstance(l, Lattice))
def test_to_from_dict(self):
d = self.tetragonal.as_dict()
t = Lattice.from_dict(d)
for i in range(3):
self.assertEqual(t.abc[i], self.tetragonal.abc[i])
self.assertEqual(t.angles[i], self.tetragonal.angles[i])
#Make sure old style dicts work.
del d["matrix"]
t = Lattice.from_dict(d)
for i in range(3):
self.assertEqual(t.abc[i], self.tetragonal.abc[i])
self.assertEqual(t.angles[i], self.tetragonal.angles[i])
def test_scale(self):
new_volume = 10
for (family_name, lattice) in self.families.items():
new_lattice = lattice.scale(new_volume)
self.assertAlmostEqual(new_lattice.volume, new_volume)
self.assertEqual(new_lattice.angles, lattice.angles)
def test_get_wigner_seitz_cell(self):
ws_cell = Lattice([[10, 0, 0], [0, 5, 0], [0, 0, 1]])\
.get_wigner_seitz_cell()
self.assertEqual(6, len(ws_cell))
for l in ws_cell[3]:
self.assertEqual([abs(i) for i in l], [5.0, 2.5, 0.5])
def test_dot_and_norm(self):
frac_basis = [[1,0,0], [0,1,0], [0,0,1]]
for family_name, lattice in self.families.items():
#print(family_name)
self.assert_equal(lattice.norm(lattice.matrix, frac_coords=False), lattice.abc)
self.assert_equal(lattice.norm(frac_basis), lattice.abc)
for (i, vec) in enumerate(frac_basis):
length = lattice.norm(vec)
self.assert_equal(length[0], lattice.abc[i])
# We always get a ndarray.
self.assertTrue(hasattr(length, "shape"))
# Passing complex arrays should raise TypeError
with self.assertRaises(TypeError):
lattice.norm(np.zeros(3, dtype=np.complex))
# Cannot reshape the second argument.
with self.assertRaises(ValueError):
lattice.dot(np.zeros(6), np.zeros(8))
# Passing vectors of different length is invalid.
with self.assertRaises(ValueError):
lattice.dot(np.zeros(3), np.zeros(6))
def test_get_points_in_sphere(self):
latt = Lattice.cubic(1)
pts = []
for a, b, c in itertools.product(range(10), range(10), range(10)):
pts.append([a / 10, b / 10, c / 10])
self.assertEqual(len(latt.get_points_in_sphere(
pts, [0, 0, 0], 0.1)), 7)
self.assertEqual(len(latt.get_points_in_sphere(
pts, [0.5, 0.5, 0.5], 0.5)), 515)
def test_get_all_distances(self):
fcoords = np.array([[0.3, 0.3, 0.5],
[0.1, 0.1, 0.3],
[0.9, 0.9, 0.8],
[0.1, 0.0, 0.5],
[0.9, 0.7, 0.0]])
lattice = Lattice.from_lengths_and_angles([8, 8, 4],
[90, 76, 58])
expected = np.array([[0.000, 3.015, 4.072, 3.519, 3.245],
[3.015, 0.000, 3.207, 1.131, 4.453],
[4.072, 3.207, 0.000, 2.251, 1.788],
[3.519, 1.131, 2.251, 0.000, 3.852],
[3.245, 4.453, 1.788, 3.852, 0.000]])
output = lattice.get_all_distances(fcoords, fcoords)
self.assertArrayAlmostEqual(output, expected, 3)
#test just one input point
output2 = lattice.get_all_distances(fcoords[0], fcoords)
self.assertArrayAlmostEqual(output2, [expected[0]], 2)
#test distance when initial points are not in unit cell
f1 = [0, 0, 17]
f2 = [0, 0, 10]
self.assertEqual(lattice.get_all_distances(f1, f2)[0, 0], 0)
def test_monoclinic(self):
lengths, angles = self.monoclinic.lengths_and_angles
self.assertNotAlmostEqual(angles[1], 90)
self.assertAlmostEqual(angles[0], 90)
self.assertAlmostEqual(angles[2], 90)
def test_is_hexagonal(self):
self.assertFalse(self.cubic.is_hexagonal())
self.assertFalse(self.tetragonal.is_hexagonal())
self.assertFalse(self.orthorhombic.is_hexagonal())
self.assertFalse(self.monoclinic.is_hexagonal())
self.assertFalse(self.rhombohedral.is_hexagonal())
self.assertTrue(self.hexagonal.is_hexagonal())
def test_get_distance_and_image(self):
dist, image = self.cubic.get_distance_and_image([0, 0, 0.1], [0, 0.,
0.9])
self.assertAlmostEqual(dist, 2)
self.assertArrayAlmostEqual(image, [0, 0, -1])
def test_get_all_distance_and_image(self):
r = self.cubic.get_all_distance_and_image([0, 0, 0.1],
[0, 0., 0.9])
self.assertEqual(len(r), 8)
dist, image = min(r, key=lambda x: x[0])
self.assertAlmostEqual(dist, 2)
self.assertArrayAlmostEqual(image, [0, 0, -1])
dist, image = max(r, key=lambda x: x[0])
self.assertAlmostEqual(dist, 16.24807680927192)
self.assertArrayAlmostEqual(image, [1, 1, 0])
if __name__ == '__main__':
import unittest
unittest.main()
|
|
## Create and compare lists of files/objects
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
from S3 import S3
from Config import Config
from S3Uri import S3Uri
from FileDict import FileDict
from Utils import *
from Exceptions import ParameterError
from HashCache import HashCache
from logging import debug, info, warning, error
import os
import sys
import glob
import copy
import re
__all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists", "filter_exclude_include"]
def _fswalk_follow_symlinks(path):
'''
Walk filesystem, following symbolic links (but without recursion), on python2.4 and later
If a symlink directory loop is detected, emit a warning and skip.
E.g.: dir1/dir2/sym-dir -> ../dir2
'''
assert os.path.isdir(path) # only designed for directory argument
walkdirs = set([path])
for dirpath, dirnames, filenames in os.walk(path):
handle_exclude_include_walk(dirpath, dirnames, [])
real_dirpath = os.path.realpath(dirpath)
for dirname in dirnames:
current = os.path.join(dirpath, dirname)
real_current = os.path.realpath(current)
if os.path.islink(current):
if (real_dirpath == real_current or
real_dirpath.startswith(real_current + os.path.sep)):
warning("Skipping recursively symlinked directory %s" % dirname)
else:
walkdirs.add(current)
for walkdir in walkdirs:
for dirpath, dirnames, filenames in os.walk(walkdir):
handle_exclude_include_walk(dirpath, dirnames, [])
yield (dirpath, dirnames, filenames)
def _fswalk_no_symlinks(path):
'''
Directory tree generator
path (str) is the root of the directory tree to walk
'''
for dirpath, dirnames, filenames in os.walk(path):
handle_exclude_include_walk(dirpath, dirnames, filenames)
yield (dirpath, dirnames, filenames)
def filter_exclude_include(src_list):
info(u"Applying --exclude/--include")
cfg = Config()
exclude_list = FileDict(ignore_case = False)
for file in src_list.keys():
debug(u"CHECK: %s" % file)
excluded = False
for r in cfg.exclude:
if r.search(file):
excluded = True
debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
break
if excluded:
## No need to check for --include if not excluded
for r in cfg.include:
if r.search(file):
excluded = False
debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
break
if excluded:
## Still excluded - ok, action it
debug(u"EXCLUDE: %s" % file)
exclude_list[file] = src_list[file]
del(src_list[file])
continue
else:
debug(u"PASS: %r" % (file))
return src_list, exclude_list
def handle_exclude_include_walk(root, dirs, files):
cfg = Config()
copydirs = copy.copy(dirs)
copyfiles = copy.copy(files)
# exclude dir matches in the current directory
# this prevents us from recursing down trees we know we want to ignore
for x in copydirs:
d = os.path.join(root, x, '')
debug(u"CHECK: %r" % d)
excluded = False
for r in cfg.exclude:
if r.search(d):
excluded = True
debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
break
if excluded:
## No need to check for --include if not excluded
for r in cfg.include:
if r.search(d):
excluded = False
debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
break
if excluded:
## Still excluded - ok, action it
debug(u"EXCLUDE: %r" % d)
dirs.remove(x)
continue
else:
debug(u"PASS: %r" % (d))
# exclude file matches in the current directory
for x in copyfiles:
file = os.path.join(root, x)
debug(u"CHECK: %r" % file)
excluded = False
for r in cfg.exclude:
if r.search(file):
excluded = True
debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
break
if excluded:
## No need to check for --include if not excluded
for r in cfg.include:
if r.search(file):
excluded = False
debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
break
if excluded:
## Still excluded - ok, action it
debug(u"EXCLUDE: %s" % file)
files.remove(x)
continue
else:
debug(u"PASS: %r" % (file))
def _get_filelist_from_file(cfg, local_path):
def _append(d, key, value):
if key not in d:
d[key] = [value]
else:
d[key].append(value)
filelist = {}
for fname in cfg.files_from:
if fname == u'-':
f = sys.stdin
else:
try:
f = open(fname, 'r')
except IOError, e:
warning(u"--files-from input file %s could not be opened for reading (%s), skipping." % (fname, e.strerror))
continue
for line in f:
line = line.strip()
line = os.path.normpath(os.path.join(local_path, line))
dirname = os.path.dirname(line)
basename = os.path.basename(line)
_append(filelist, dirname, basename)
if f != sys.stdin:
f.close()
# reformat to match os.walk()
result = []
keys = filelist.keys()
keys.sort()
for key in keys:
values = filelist[key]
values.sort()
result.append((key, [], values))
return result
def fetch_local_list(args, is_src = False, recursive = None):
def _get_filelist_local(loc_list, local_uri, cache):
info(u"Compiling list of local files...")
if deunicodise(local_uri.basename()) == "-":
loc_list["-"] = {
'full_name_unicode' : '-',
'full_name' : '-',
'size' : -1,
'mtime' : -1,
}
return loc_list, True
if local_uri.isdir():
local_base = deunicodise(local_uri.basename())
local_path = deunicodise(local_uri.path())
if is_src and len(cfg.files_from):
filelist = _get_filelist_from_file(cfg, local_path)
single_file = False
else:
if cfg.follow_symlinks:
filelist = _fswalk_follow_symlinks(local_path)
else:
filelist = _fswalk_no_symlinks(local_path)
single_file = False
else:
local_base = ""
local_path = deunicodise(local_uri.dirname())
filelist = [( local_path, [], [deunicodise(local_uri.basename())] )]
single_file = True
for root, dirs, files in filelist:
rel_root = root.replace(local_path, local_base, 1)
for f in files:
full_name = os.path.join(root, f)
if not os.path.isfile(full_name):
continue
if os.path.islink(full_name):
if not cfg.follow_symlinks:
continue
relative_file = unicodise(os.path.join(rel_root, f))
if os.path.sep != "/":
# Convert non-unix dir separators to '/'
relative_file = "/".join(relative_file.split(os.path.sep))
if cfg.urlencoding_mode == "normal":
relative_file = replace_nonprintables(relative_file)
if relative_file.startswith('./'):
relative_file = relative_file[2:]
sr = os.stat_result(os.lstat(full_name))
loc_list[relative_file] = {
'full_name_unicode' : unicodise(full_name),
'full_name' : full_name,
'size' : sr.st_size,
'mtime' : sr.st_mtime,
'dev' : sr.st_dev,
'inode' : sr.st_ino,
'uid' : sr.st_uid,
'gid' : sr.st_gid,
'sr': sr # save it all, may need it in preserve_attrs_list
## TODO: Possibly more to save here...
}
if 'md5' in cfg.sync_checks:
md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
if md5 is None:
try:
md5 = loc_list.get_md5(relative_file) # this does the file I/O
except IOError:
continue
cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5)
return loc_list, single_file
def _maintain_cache(cache, local_list):
# if getting the file list from files_from, it is going to be
# a subset of the actual tree. We should not purge content
# outside of that subset as we don't know if it's valid or
# not. Leave it to a non-files_from run to purge.
if cfg.cache_file and len(cfg.files_from) == 0:
cache.mark_all_for_purge()
for i in local_list.keys():
cache.unmark_for_purge(local_list[i]['dev'], local_list[i]['inode'], local_list[i]['mtime'], local_list[i]['size'])
cache.purge()
cache.save(cfg.cache_file)
cfg = Config()
cache = HashCache()
if cfg.cache_file:
try:
cache.load(cfg.cache_file)
except IOError:
info(u"No cache file found, creating it.")
local_uris = []
local_list = FileDict(ignore_case = False)
single_file = False
if type(args) not in (list, tuple):
args = [args]
if recursive == None:
recursive = cfg.recursive
for arg in args:
uri = S3Uri(arg)
if not uri.type == 'file':
raise ParameterError("Expecting filename or directory instead of: %s" % arg)
if uri.isdir() and not recursive:
raise ParameterError("Use --recursive to upload a directory: %s" % arg)
local_uris.append(uri)
for uri in local_uris:
list_for_uri, single_file = _get_filelist_local(local_list, uri, cache)
## Single file is True if and only if the user
## specified one local URI and that URI represents
## a FILE. Ie it is False if the URI was of a DIR
## and that dir contained only one FILE. That's not
## a case of single_file==True.
if len(local_list) > 1:
single_file = False
_maintain_cache(cache, local_list)
return local_list, single_file
def fetch_remote_list(args, require_attribs = False, recursive = None):
def _get_remote_attribs(uri, remote_item):
response = S3(cfg).object_info(uri)
remote_item.update({
'size': int(response['headers']['content-length']),
'md5': response['headers']['etag'].strip('"\''),
'timestamp' : dateRFC822toUnix(response['headers']['date'])
})
try:
md5 = response['s3cmd-attrs']['md5']
remote_item.update({'md5': md5})
debug(u"retreived md5=%s from headers" % md5)
except KeyError:
pass
def _get_filelist_remote(remote_uri, recursive = True):
## If remote_uri ends with '/' then all remote files will have
## the remote_uri prefix removed in the relative path.
## If, on the other hand, the remote_uri ends with something else
## (probably alphanumeric symbol) we'll use the last path part
## in the relative path.
##
## Complicated, eh? See an example:
## _get_filelist_remote("s3://bckt/abc/def") may yield:
## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
## _get_filelist_remote("s3://bckt/abc/def/") will yield:
## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
## Furthermore a prefix-magic can restrict the return list:
## _get_filelist_remote("s3://bckt/abc/def/x") yields:
## { 'xyz/blah.txt' : {} }
info(u"Retrieving list of remote files for %s ..." % remote_uri)
s3 = S3(Config())
response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive)
rem_base_original = rem_base = remote_uri.object()
remote_uri_original = remote_uri
if rem_base != '' and rem_base[-1] != '/':
rem_base = rem_base[:rem_base.rfind('/')+1]
remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
rem_base_len = len(rem_base)
rem_list = FileDict(ignore_case = False)
break_now = False
for object in response['list']:
if object['Key'] == rem_base_original and object['Key'][-1] != "/":
## We asked for one file and we got that file :-)
key = os.path.basename(object['Key'])
object_uri_str = remote_uri_original.uri()
break_now = True
rem_list = FileDict(ignore_case = False) ## Remove whatever has already been put to rem_list
else:
key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !!
object_uri_str = remote_uri.uri() + key
rem_list[key] = {
'size' : int(object['Size']),
'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
'md5' : object['ETag'][1:-1],
'object_key' : object['Key'],
'object_uri_str' : object_uri_str,
'base_uri' : remote_uri,
'dev' : None,
'inode' : None,
}
if rem_list[key]['md5'].find("-") > 0: # always get it for multipart uploads
_get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
md5 = rem_list[key]['md5']
rem_list.record_md5(key, md5)
if break_now:
break
return rem_list
cfg = Config()
remote_uris = []
remote_list = FileDict(ignore_case = False)
if type(args) not in (list, tuple):
args = [args]
if recursive == None:
recursive = cfg.recursive
for arg in args:
uri = S3Uri(arg)
if not uri.type == 's3':
raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
remote_uris.append(uri)
if recursive:
for uri in remote_uris:
objectlist = _get_filelist_remote(uri)
for key in objectlist:
remote_list[key] = objectlist[key]
remote_list.record_md5(key, objectlist.get_md5(key))
else:
for uri in remote_uris:
uri_str = str(uri)
## Wildcards used in remote URI?
## If yes we'll need a bucket listing...
wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1)
if len(wildcard_split_result) == 2: # wildcards found
prefix, rest = wildcard_split_result
## Only request recursive listing if the 'rest' of the URI,
## i.e. the part after first wildcard, contains '/'
need_recursion = '/' in rest
objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
for key in objectlist:
## Check whether the 'key' matches the requested wildcards
if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
remote_list[key] = objectlist[key]
else:
## No wildcards - simply append the given URI to the list
key = os.path.basename(uri.object())
if not key:
raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
remote_item = {
'base_uri': uri,
'object_uri_str': unicode(uri),
'object_key': uri.object()
}
if require_attribs:
_get_remote_attribs(uri, remote_item)
remote_list[key] = remote_item
md5 = remote_item.get('md5')
if md5:
remote_list.record_md5(key, md5)
return remote_list
def compare_filelists(src_list, dst_list, src_remote, dst_remote, delay_updates = False):
def __direction_str(is_remote):
return is_remote and "remote" or "local"
def _compare(src_list, dst_lst, src_remote, dst_remote, file):
"""Return True if src_list[file] matches dst_list[file], else False"""
attribs_match = True
if not (src_list.has_key(file) and dst_list.has_key(file)):
info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, src_list.has_key(file), dst_list.has_key(file)))
return False
## check size first
if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']:
debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size']))
attribs_match = False
## check md5
compare_md5 = 'md5' in cfg.sync_checks
# Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn"
if compare_md5:
if (src_remote == True and src_list[file]['md5'].find("-") >= 0) or (dst_remote == True and dst_list[file]['md5'].find("-") >= 0):
compare_md5 = False
info(u"disabled md5 check for %s" % file)
if attribs_match and compare_md5:
try:
src_md5 = src_list.get_md5(file)
dst_md5 = dst_list.get_md5(file)
except (IOError,OSError), e:
# md5 sum verification failed - ignore that file altogether
debug(u"IGNR: %s (disappeared)" % (file))
warning(u"%s: file disappeared, ignoring." % (file))
raise
if src_md5 != dst_md5:
## checksums are different.
attribs_match = False
debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5))
return attribs_match
# we don't support local->local sync, use 'rsync' or something like that instead ;-)
assert(not(src_remote == False and dst_remote == False))
info(u"Verifying attributes...")
cfg = Config()
## Items left on src_list will be transferred
## Items left on update_list will be transferred after src_list
## Items left on copy_pairs will be copied from dst1 to dst2
update_list = FileDict(ignore_case = False)
## Items left on dst_list will be deleted
copy_pairs = []
debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote)))
for relative_file in src_list.keys():
debug(u"CHECK: %s" % (relative_file))
if dst_list.has_key(relative_file):
## Was --skip-existing requested?
if cfg.skip_existing:
debug(u"IGNR: %s (used --skip-existing)" % (relative_file))
del(src_list[relative_file])
del(dst_list[relative_file])
continue
try:
same_file = _compare(src_list, dst_list, src_remote, dst_remote, relative_file)
except (IOError,OSError), e:
debug(u"IGNR: %s (disappeared)" % (relative_file))
warning(u"%s: file disappeared, ignoring." % (relative_file))
del(src_list[relative_file])
del(dst_list[relative_file])
continue
if same_file:
debug(u"IGNR: %s (transfer not needed)" % relative_file)
del(src_list[relative_file])
del(dst_list[relative_file])
else:
# look for matching file in src
try:
md5 = src_list.get_md5(relative_file)
except IOError:
md5 = None
if md5 is not None and dst_list.by_md5.has_key(md5):
# Found one, we want to copy
dst1 = list(dst_list.by_md5[md5])[0]
debug(u"DST COPY src: %s -> %s" % (dst1, relative_file))
copy_pairs.append((src_list[relative_file], dst1, relative_file))
del(src_list[relative_file])
del(dst_list[relative_file])
else:
# record that we will get this file transferred to us (before all the copies), so if we come across it later again,
# we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter).
dst_list.record_md5(relative_file, md5)
update_list[relative_file] = src_list[relative_file]
del src_list[relative_file]
del dst_list[relative_file]
else:
# dst doesn't have this file
# look for matching file elsewhere in dst
try:
md5 = src_list.get_md5(relative_file)
except IOError:
md5 = None
dst1 = dst_list.find_md5_one(md5)
if dst1 is not None:
# Found one, we want to copy
debug(u"DST COPY dst: %s -> %s" % (dst1, relative_file))
copy_pairs.append((src_list[relative_file], dst1, relative_file))
del(src_list[relative_file])
else:
# we don't have this file, and we don't have a copy of this file elsewhere. Get it.
# record that we will get this file transferred to us (before all the copies), so if we come across it later again,
# we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter).
dst_list.record_md5(relative_file, md5)
for f in dst_list.keys():
if src_list.has_key(f) or update_list.has_key(f):
# leave only those not on src_list + update_list
del dst_list[f]
return src_list, dst_list, update_list, copy_pairs
# vim:et:ts=4:sts=4:ai
|
|
"""
Analysis tools for Topographica, other than plotting tools.
Configures the interface to the featuremapper and holoviews projects
and sets the appropriate Topographica-specific hooks.
"""
import numpy as np
from holoviews.interface.collector import Reference
from holoviews import HSV, Image
from holoviews.core.options import Compositor
from holoviews.ipython import IPTestCase
from holoviews.operation import chain, operation, factory, image_overlay
import imagen.colorspaces
from featuremapper.command import Collector, measure_response
import topo
from topo.analysis.featureresponses import FeatureResponses, FeatureCurves,\
FeatureMaps, ReverseCorrelation, MeasureResponseCommand, pattern_response,\
topo_metadata_fn, StorageHook, get_feature_preference
from topo.base.projection import Projection
from topo.base.sheet import Sheet
from topo.base.sheetview import CFView
from topo.misc.ipython import RunProgress
from topo.misc import color
from command import measure_cog
CoG_spec = "Image.X CoG * Image.Y CoG * Image.BlueChannel"
XYCoG = chain.instance(group='XYCoG', name='XYCoG',
operations = [image_overlay.instance(spec=CoG_spec), factory.instance()])
Compositor.register(Compositor("Image.X CoG * Image.Y CoG", XYCoG, 'XYCoG', 'display'))
import param
from holoviews import RGB, ElementOperation
from holoviews.operation.normalization import raster_normalization
class colorizeHSV(ElementOperation):
"""
Given an Overlay consisting of two Image elements, colorize the
data in the bottom Image with the data in the top Image using
the HSV color space.
"""
group = param.String(default='ColorizedHSV', doc="""
The group string for the colorized output (an RGB element)""")
output_type = RGB
def _process(self, overlay, key=None):
if len(overlay) != 2:
raise Exception("colorizeHSV required an overlay of two Image elements as input.")
if (len(overlay.get(0).vdims), len(overlay.get(1).vdims)) != (1,1):
raise Exception("Each Image element must have single value dimension.")
if overlay.get(0).shape != overlay.get(1).shape:
raise Exception("Mismatch in the shapes of the data in the Image elements.")
hue = overlay.get(1)
Hdim = hue.vdims[0]
H = hue.clone(hue.data.copy(),
vdims=[Hdim(cyclic=True, range=hue.range(Hdim.name))])
normfn = raster_normalization.instance()
if self.p.input_ranges:
S = normfn.process_element(overlay.get(0), key, *self.p.input_ranges)
else:
S = normfn.process_element(overlay.get(0), key)
C = Image(np.ones(hue.data.shape),
bounds=self.get_overlay_bounds(overlay), group='F', label='G')
C.vdims[0].range = (0,1)
S.vdims[0].range = (0,1)
return HSV(H * C * S).relabel(group=self.p.group)
Compositor.register(
Compositor('CFView.CF Weight * Image.Orientation_Preference',
colorizeHSV, 'ColorizedWeights', mode='display'))
class TopoIPTestCase(IPTestCase):
def __init__(self, *args, **kwargs):
super(TopoIPTestCase, self).__init__(*args, **kwargs)
@classmethod
def register(cls):
super(TopoIPTestCase, cls).register()
cls.equality_type_funcs[CFView] = cls.compare_cfview
return cls.equality_type_funcs
@classmethod
def compare_cfview(cls, el1, el2, msg='CFView data'):
cls.compare_image(el1, el2, msg=msg)
class SimRef(Reference):
"""
A SimRef instance is installed on Collector to allow Topographica
model elements to be referenced for collection.
This is important to allow pickling and unpickling of Collectors
that work correctly with Topographica in different execution
environments (e.g. nodes of a cluster) and across different models
without directly pickling the components (e.g. Sheets and
Projections) themselves.
More information about references can be found in the docstring of
the holoviews.collector.Reference.
"""
@property
def resolved_type(self):
if self.array_ref:
return np.ndarray
elif isinstance(self.obj, tuple):
return Projection
else:
return Sheet
def __init__(self, obj=None, array_ref=None):
if topo.sim.model is not None:
print "DEPRECATION WARNING: use topo.submodel.specifications instead of SimRef."
if [obj, array_ref] == [None,None]:
raise Exception("Please specify an object, a path string or an array_ref.")
self.array_ref = None
if obj is None:
self.obj = None
self.array_ref = array_ref
elif isinstance(obj, str):
self.obj = tuple(obj.split('.')) if '.' in obj else obj
elif isinstance(obj, Projection):
self.obj = (obj.dest.name, obj.name)
else:
self.obj = obj.name
def resolve(self):
from topo import sim
if isinstance(self.obj, tuple):
(sheet, proj) = self.obj
return sim[sheet].projections()[proj]
elif self.obj:
return sim[self.obj]
else:
return eval('topo.sim.'+self.array_ref)
def __repr__(self):
if isinstance(self.obj, tuple):
return "SimRef(%r)" % '.'.join(el for el in self.obj)
elif self.obj is None:
return "SimRef(array_ref=%r)" % self.array_ref
else:
return "SimRef(%r)" % self.obj
def __str__(self):
if isinstance(self.obj, tuple):
return "topo.sim."+'.'.join(el for el in self.obj)
elif self.obj is None:
return "topo.sim." + self.array_ref
else:
return "topo.sim."+ self.obj
### Collection hooks
Collector.time_fn = topo.sim.time
Collector.interval_hook = RunProgress
def sheet_hook(obj, *args, **kwargs):
"""
Return a Image of the Sheet activity.
"""
return obj[:]
def projection_hook(obj, *args, **kwargs):
"""
Return a Image of the projection activity, otherwise if
grid=True, return a Grid of the CFs.
"""
if kwargs.pop('grid', False):
return obj.grid(**kwargs)
else:
return obj.projection_view()
def measurement_hook(obj, *args, **kwargs):
return obj(*args, **kwargs)
# Configure Collector with appropriate hooks
Collector.sim = SimRef
Collector.for_type(Sheet, sheet_hook, referencer=SimRef)
Collector.for_type(Projection, projection_hook, referencer=SimRef)
Collector.for_type(measure_cog, measurement_hook, mode='merge')
# Setting default channel operation for ON-OFF visualization
op_subtract = operation.instance(output_type=CFView, op=lambda x, k: x.collapse(np.subtract))
Compositor.register(Compositor('CFView.CF_Weight * CFView.CF_Weight',
op_subtract, 'OnOff CFs', mode='data'))
# Featuremapper hooks
def empty_storage_hook(arg):
"""Use this to unset storage hook because lambda will not work
with snapshots.
This function is used in notebook_setup.py of the topographica
IPython profile.
"""
pass
FeatureResponses.metadata_fns = [topo_metadata_fn]
FeatureResponses.pattern_response_fn = pattern_response.instance()
FeatureMaps.measurement_storage_hook = StorageHook.instance(sublabel='Maps')
FeatureCurves.measurement_storage_hook = StorageHook.instance(sublabel='Curves')
ReverseCorrelation.measurement_storage_hook = StorageHook.instance(sublabel='RFs')
measure_response.measurement_storage_hook = StorageHook.instance(sublabel=None)
measure_cog.measurement_storage_hook = StorageHook.instance(sublabel='CoG')
MeasureResponseCommand.preference_lookup_fn = get_feature_preference
MeasureResponseCommand.pattern_response_fn = pattern_response.instance()
## Set optimized versions of color conversion functions
imagen.colorspaces.rgb_to_hsv = color._rgb_to_hsv_array_opt
imagen.colorspaces.hsv_to_rgb = color._hsv_to_rgb_array_opt
# Automatically discover all .py files in this directory.
import os,fnmatch
__all__ = [f.split('.py')[0] for f in os.listdir(__path__[0]) if fnmatch.fnmatch(f,'[!._]*.py')]
del f,os,fnmatch
|
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
from six.moves import map
class PlatformInfo(object):
"""This class provides a consistent (and mockable) interpretation of
system-specific values (like sys.platform and platform.mac_ver())
to be used by the rest of the blinkpy code base.
Public (static) properties:
-- os_name
-- os_version
Note that 'future' is returned for os_version if the operating system is
newer than one known to the code.
"""
def __init__(self, sys_module, platform_module, filesystem_module,
executive):
self._executive = executive
self._filesystem = filesystem_module
self._platform_module = platform_module
self.os_name = self._determine_os_name(sys_module.platform)
if self.os_name == 'linux':
self.os_version = self._determine_linux_version(platform_module)
if self.os_name == 'freebsd':
self.os_version = platform_module.release()
if self.os_name.startswith('mac'):
self.os_version = self._determine_mac_version(
platform_module.mac_ver()[0])
if self.os_name.startswith('win'):
self.os_version = self._determine_win_version(
self._win_version_tuple())
assert sys.platform != 'cygwin', 'Cygwin is not supported.'
def is_mac(self):
return self.os_name == 'mac'
def is_mac_monterey(self):
if not self.is_mac():
return False
command = ['sw_vers', '-productVersion']
version = self._executive.run_command(command).strip()
return version.startswith('12.')
def is_win(self):
return self.os_name == 'win'
def is_linux(self):
return self.os_name == 'linux'
def is_freebsd(self):
return self.os_name == 'freebsd'
def is_highdpi(self):
if self.is_mac():
output = self._executive.run_command(
['system_profiler', 'SPDisplaysDataType'],
error_handler=self._executive.ignore_error)
if output and re.search(r'Resolution:.*Retina$', output,
re.MULTILINE):
return True
return False
def is_running_rosetta(self):
if self.is_mac():
# If we are running under Rosetta, platform.machine() is
# 'x86_64'; we need to use a sysctl to see if we're being
# translated.
import ctypes
libSystem = ctypes.CDLL("libSystem.dylib")
ret = ctypes.c_int(0)
size = ctypes.c_size_t(4)
e = libSystem.sysctlbyname(ctypes.c_char_p(b'sysctl.proc_translated'),
ctypes.byref(ret), ctypes.byref(size), None, 0)
return e == 0 and ret.value == 1
return False
def display_name(self):
# platform.platform() returns Darwin information for Mac, which is just confusing.
if self.is_mac():
return 'Mac OS X %s' % self._platform_module.mac_ver()[0]
# Returns strings like:
# Linux-2.6.18-194.3.1.el5-i686-with-redhat-5.5-Final
# Windows-2008ServerR2-6.1.7600
return self._platform_module.platform()
def total_bytes_memory(self):
if self.is_mac():
return int(
self._executive.run_command(['sysctl', '-n', 'hw.memsize']))
return None
def terminal_width(self):
"""Returns sys.maxint if the width cannot be determined."""
try:
if self.is_win():
# From http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12) # -12 == stderr
# 22 == sizeof(console_screen_buffer_info)
console_screen_buffer_info = create_string_buffer(22)
if windll.kernel32.GetConsoleScreenBufferInfo(
handle, console_screen_buffer_info):
import struct
_, _, _, _, _, left, _, right, _, _, _ = struct.unpack(
'hhhhHhhhhhh', console_screen_buffer_info.raw)
# Note that we return 1 less than the width since writing into the rightmost column
# automatically performs a line feed.
return right - left
return sys.maxsize
else:
import fcntl
import struct
import termios
packed = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ,
'\0' * 8)
_, columns, _, _ = struct.unpack('HHHH', packed)
return columns
except Exception: # pylint: disable=broad-except
return sys.maxsize
def get_machine(self):
return self._platform_module.machine()
def linux_distribution(self):
if not self.is_linux():
return None
# Fedora also has /etc/redhat-release, this check must go first.
if self._filesystem.exists('/etc/fedora-release'):
return 'fedora'
if self._filesystem.exists('/etc/redhat-release'):
return 'redhat'
if self._filesystem.exists('/etc/debian_version'):
return 'debian'
if self._filesystem.exists('/etc/arch-release'):
return 'arch'
return 'unknown'
def _determine_os_name(self, sys_platform):
if sys_platform == 'darwin':
return 'mac'
if sys_platform.startswith('linux'):
return 'linux'
if sys_platform == 'win32':
return 'win'
if sys_platform.startswith('freebsd'):
return 'freebsd'
raise AssertionError(
'unrecognized platform string "%s"' % sys_platform)
def _determine_mac_version(self, mac_version_string):
major_release = int(mac_version_string.split('.')[0])
minor_release = int(mac_version_string.split('.')[1])
if major_release == 10:
assert 10 <= minor_release <= 16, 'Unsupported mac OS version: %s' % mac_version_string
return 'mac{major_release}.{minor_release}'.format(
major_release=major_release,
minor_release=minor_release,
)
else:
assert 11 <= major_release <= 12, 'Unsupported mac OS version: %s' % mac_version_string
return 'mac{major_release}'.format(major_release=major_release, )
def _determine_linux_version(self, _):
return 'trusty'
def _determine_win_version(self, win_version_tuple):
if win_version_tuple[:2] == (10, 0):
# For win11 platform.win32_ver() returns (10, 0, 22000)
if win_version_tuple[2] >= 22000:
return '11'
else:
return '10.20h2'
if win_version_tuple[:2] == (6, 3):
return '8.1'
if win_version_tuple[:2] == (6, 2):
return '8'
if win_version_tuple[:3] == (6, 1, 7601):
return '7sp1'
if win_version_tuple[:3] == (6, 1, 7600):
return '7sp0'
if win_version_tuple[:2] == (6, 0):
return 'vista'
if win_version_tuple[:2] == (5, 1):
return 'xp'
assert (win_version_tuple[0] > 10
or win_version_tuple[0] == 10 and win_version_tuple[1] > 0), (
'Unrecognized Windows version tuple: "%s"' %
(win_version_tuple, ))
return 'future'
def _win_version_tuple(self):
version_str = self._platform_module.win32_ver()[1]
if version_str:
return tuple(map(int, version_str.split('.')))
return self._win_version_tuple_from_cmd()
def _win_version_tuple_from_cmd(self):
# Note that this should only ever be called on windows, so this should always work.
ver_output = self._executive.run_command(['cmd', '/c', 'ver'],
decode_output=False)
match_object = re.search(
r'(?P<major>\d+)\.(?P<minor>\d)\.(?P<build>\d+)', ver_output)
assert match_object, 'cmd returned an unexpected version string: ' + ver_output
return tuple(map(int, match_object.groups()))
|
|
"""Constants for the opentherm_gw integration."""
import pyotgw.vars as gw_vars
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS
ATTR_GW_ID = "gateway_id"
ATTR_MODE = "mode"
ATTR_LEVEL = "level"
CONF_CLIMATE = "climate"
CONF_FLOOR_TEMP = "floor_temperature"
CONF_PRECISION = "precision"
DATA_GATEWAYS = "gateways"
DATA_OPENTHERM_GW = "opentherm_gw"
DEVICE_CLASS_COLD = "cold"
DEVICE_CLASS_HEAT = "heat"
DEVICE_CLASS_PROBLEM = "problem"
SERVICE_RESET_GATEWAY = "reset_gateway"
SERVICE_SET_CLOCK = "set_clock"
SERVICE_SET_CONTROL_SETPOINT = "set_control_setpoint"
SERVICE_SET_GPIO_MODE = "set_gpio_mode"
SERVICE_SET_LED_MODE = "set_led_mode"
SERVICE_SET_MAX_MOD = "set_max_modulation"
SERVICE_SET_OAT = "set_outside_temperature"
SERVICE_SET_SB_TEMP = "set_setback_temperature"
UNIT_BAR = "bar"
UNIT_HOUR = "h"
UNIT_KW = "kW"
UNIT_L_MIN = "L/min"
UNIT_PERCENT = "%"
BINARY_SENSOR_INFO = {
# [device_class, friendly_name format]
gw_vars.DATA_MASTER_CH_ENABLED: [None, "Thermostat Central Heating Enabled {}"],
gw_vars.DATA_MASTER_DHW_ENABLED: [None, "Thermostat Hot Water Enabled {}"],
gw_vars.DATA_MASTER_COOLING_ENABLED: [None, "Thermostat Cooling Enabled {}"],
gw_vars.DATA_MASTER_OTC_ENABLED: [
None,
"Thermostat Outside Temperature Correction Enabled {}",
],
gw_vars.DATA_MASTER_CH2_ENABLED: [None, "Thermostat Central Heating 2 Enabled {}"],
gw_vars.DATA_SLAVE_FAULT_IND: [DEVICE_CLASS_PROBLEM, "Boiler Fault Indication {}"],
gw_vars.DATA_SLAVE_CH_ACTIVE: [
DEVICE_CLASS_HEAT,
"Boiler Central Heating Status {}",
],
gw_vars.DATA_SLAVE_DHW_ACTIVE: [DEVICE_CLASS_HEAT, "Boiler Hot Water Status {}"],
gw_vars.DATA_SLAVE_FLAME_ON: [DEVICE_CLASS_HEAT, "Boiler Flame Status {}"],
gw_vars.DATA_SLAVE_COOLING_ACTIVE: [DEVICE_CLASS_COLD, "Boiler Cooling Status {}"],
gw_vars.DATA_SLAVE_CH2_ACTIVE: [
DEVICE_CLASS_HEAT,
"Boiler Central Heating 2 Status {}",
],
gw_vars.DATA_SLAVE_DIAG_IND: [
DEVICE_CLASS_PROBLEM,
"Boiler Diagnostics Indication {}",
],
gw_vars.DATA_SLAVE_DHW_PRESENT: [None, "Boiler Hot Water Present {}"],
gw_vars.DATA_SLAVE_CONTROL_TYPE: [None, "Boiler Control Type {}"],
gw_vars.DATA_SLAVE_COOLING_SUPPORTED: [None, "Boiler Cooling Support {}"],
gw_vars.DATA_SLAVE_DHW_CONFIG: [None, "Boiler Hot Water Configuration {}"],
gw_vars.DATA_SLAVE_MASTER_LOW_OFF_PUMP: [None, "Boiler Pump Commands Support {}"],
gw_vars.DATA_SLAVE_CH2_PRESENT: [None, "Boiler Central Heating 2 Present {}"],
gw_vars.DATA_SLAVE_SERVICE_REQ: [
DEVICE_CLASS_PROBLEM,
"Boiler Service Required {}",
],
gw_vars.DATA_SLAVE_REMOTE_RESET: [None, "Boiler Remote Reset Support {}"],
gw_vars.DATA_SLAVE_LOW_WATER_PRESS: [
DEVICE_CLASS_PROBLEM,
"Boiler Low Water Pressure {}",
],
gw_vars.DATA_SLAVE_GAS_FAULT: [DEVICE_CLASS_PROBLEM, "Boiler Gas Fault {}"],
gw_vars.DATA_SLAVE_AIR_PRESS_FAULT: [
DEVICE_CLASS_PROBLEM,
"Boiler Air Pressure Fault {}",
],
gw_vars.DATA_SLAVE_WATER_OVERTEMP: [
DEVICE_CLASS_PROBLEM,
"Boiler Water Overtemperature {}",
],
gw_vars.DATA_REMOTE_TRANSFER_DHW: [
None,
"Remote Hot Water Setpoint Transfer Support {}",
],
gw_vars.DATA_REMOTE_TRANSFER_MAX_CH: [
None,
"Remote Maximum Central Heating Setpoint Write Support {}",
],
gw_vars.DATA_REMOTE_RW_DHW: [None, "Remote Hot Water Setpoint Write Support {}"],
gw_vars.DATA_REMOTE_RW_MAX_CH: [
None,
"Remote Central Heating Setpoint Write Support {}",
],
gw_vars.DATA_ROVRD_MAN_PRIO: [None, "Remote Override Manual Change Priority {}"],
gw_vars.DATA_ROVRD_AUTO_PRIO: [None, "Remote Override Program Change Priority {}"],
gw_vars.OTGW_GPIO_A_STATE: [None, "Gateway GPIO A State {}"],
gw_vars.OTGW_GPIO_B_STATE: [None, "Gateway GPIO B State {}"],
gw_vars.OTGW_IGNORE_TRANSITIONS: [None, "Gateway Ignore Transitions {}"],
gw_vars.OTGW_OVRD_HB: [None, "Gateway Override High Byte {}"],
}
SENSOR_INFO = {
# [device_class, unit, friendly_name]
gw_vars.DATA_CONTROL_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Control Setpoint {}",
],
gw_vars.DATA_MASTER_MEMBERID: [None, None, "Thermostat Member ID {}"],
gw_vars.DATA_SLAVE_MEMBERID: [None, None, "Boiler Member ID {}"],
gw_vars.DATA_SLAVE_OEM_FAULT: [None, None, "Boiler OEM Fault Code {}"],
gw_vars.DATA_COOLING_CONTROL: [None, UNIT_PERCENT, "Cooling Control Signal {}"],
gw_vars.DATA_CONTROL_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Control Setpoint 2 {}",
],
gw_vars.DATA_ROOM_SETPOINT_OVRD: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint Override {}",
],
gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD: [
None,
UNIT_PERCENT,
"Boiler Maximum Relative Modulation {}",
],
gw_vars.DATA_SLAVE_MAX_CAPACITY: [None, UNIT_KW, "Boiler Maximum Capacity {}"],
gw_vars.DATA_SLAVE_MIN_MOD_LEVEL: [
None,
UNIT_PERCENT,
"Boiler Minimum Modulation Level {}",
],
gw_vars.DATA_ROOM_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint {}",
],
gw_vars.DATA_REL_MOD_LEVEL: [None, UNIT_PERCENT, "Relative Modulation Level {}"],
gw_vars.DATA_CH_WATER_PRESS: [None, UNIT_BAR, "Central Heating Water Pressure {}"],
gw_vars.DATA_DHW_FLOW_RATE: [None, UNIT_L_MIN, "Hot Water Flow Rate {}"],
gw_vars.DATA_ROOM_SETPOINT_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Setpoint 2 {}",
],
gw_vars.DATA_ROOM_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Room Temperature {}",
],
gw_vars.DATA_CH_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Central Heating Water Temperature {}",
],
gw_vars.DATA_DHW_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Temperature {}",
],
gw_vars.DATA_OUTSIDE_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Outside Temperature {}",
],
gw_vars.DATA_RETURN_WATER_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Return Water Temperature {}",
],
gw_vars.DATA_SOLAR_STORAGE_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Solar Storage Temperature {}",
],
gw_vars.DATA_SOLAR_COLL_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Solar Collector Temperature {}",
],
gw_vars.DATA_CH_WATER_TEMP_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Central Heating 2 Water Temperature {}",
],
gw_vars.DATA_DHW_TEMP_2: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water 2 Temperature {}",
],
gw_vars.DATA_EXHAUST_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Exhaust Temperature {}",
],
gw_vars.DATA_SLAVE_DHW_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Maximum Setpoint {}",
],
gw_vars.DATA_SLAVE_DHW_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Minimum Setpoint {}",
],
gw_vars.DATA_SLAVE_CH_MAX_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Boiler Maximum Central Heating Setpoint {}",
],
gw_vars.DATA_SLAVE_CH_MIN_SETP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Boiler Minimum Central Heating Setpoint {}",
],
gw_vars.DATA_DHW_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Hot Water Setpoint {}",
],
gw_vars.DATA_MAX_CH_SETPOINT: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Maximum Central Heating Setpoint {}",
],
gw_vars.DATA_OEM_DIAG: [None, None, "OEM Diagnostic Code {}"],
gw_vars.DATA_TOTAL_BURNER_STARTS: [None, None, "Total Burner Starts {}"],
gw_vars.DATA_CH_PUMP_STARTS: [None, None, "Central Heating Pump Starts {}"],
gw_vars.DATA_DHW_PUMP_STARTS: [None, None, "Hot Water Pump Starts {}"],
gw_vars.DATA_DHW_BURNER_STARTS: [None, None, "Hot Water Burner Starts {}"],
gw_vars.DATA_TOTAL_BURNER_HOURS: [None, UNIT_HOUR, "Total Burner Hours {}"],
gw_vars.DATA_CH_PUMP_HOURS: [None, UNIT_HOUR, "Central Heating Pump Hours {}"],
gw_vars.DATA_DHW_PUMP_HOURS: [None, UNIT_HOUR, "Hot Water Pump Hours {}"],
gw_vars.DATA_DHW_BURNER_HOURS: [None, UNIT_HOUR, "Hot Water Burner Hours {}"],
gw_vars.DATA_MASTER_OT_VERSION: [None, None, "Thermostat OpenTherm Version {}"],
gw_vars.DATA_SLAVE_OT_VERSION: [None, None, "Boiler OpenTherm Version {}"],
gw_vars.DATA_MASTER_PRODUCT_TYPE: [None, None, "Thermostat Product Type {}"],
gw_vars.DATA_MASTER_PRODUCT_VERSION: [None, None, "Thermostat Product Version {}"],
gw_vars.DATA_SLAVE_PRODUCT_TYPE: [None, None, "Boiler Product Type {}"],
gw_vars.DATA_SLAVE_PRODUCT_VERSION: [None, None, "Boiler Product Version {}"],
gw_vars.OTGW_MODE: [None, None, "Gateway/Monitor Mode {}"],
gw_vars.OTGW_DHW_OVRD: [None, None, "Gateway Hot Water Override Mode {}"],
gw_vars.OTGW_ABOUT: [None, None, "Gateway Firmware Version {}"],
gw_vars.OTGW_BUILD: [None, None, "Gateway Firmware Build {}"],
gw_vars.OTGW_CLOCKMHZ: [None, None, "Gateway Clock Speed {}"],
gw_vars.OTGW_LED_A: [None, None, "Gateway LED A Mode {}"],
gw_vars.OTGW_LED_B: [None, None, "Gateway LED B Mode {}"],
gw_vars.OTGW_LED_C: [None, None, "Gateway LED C Mode {}"],
gw_vars.OTGW_LED_D: [None, None, "Gateway LED D Mode {}"],
gw_vars.OTGW_LED_E: [None, None, "Gateway LED E Mode {}"],
gw_vars.OTGW_LED_F: [None, None, "Gateway LED F Mode {}"],
gw_vars.OTGW_GPIO_A: [None, None, "Gateway GPIO A Mode {}"],
gw_vars.OTGW_GPIO_B: [None, None, "Gateway GPIO B Mode {}"],
gw_vars.OTGW_SB_TEMP: [
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
"Gateway Setback Temperature {}",
],
gw_vars.OTGW_SETP_OVRD_MODE: [None, None, "Gateway Room Setpoint Override Mode {}"],
gw_vars.OTGW_SMART_PWR: [None, None, "Gateway Smart Power Mode {}"],
gw_vars.OTGW_THRM_DETECT: [None, None, "Gateway Thermostat Detection {}"],
gw_vars.OTGW_VREF: [None, None, "Gateway Reference Voltage Setting {}"],
}
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.common import constants as n_const
from neutron.common import utils
from neutron import context
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_hamode_db
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2 import models
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit import testlib_api
HOST = helpers.HOST
HOST_2 = 'HOST_2'
HOST_3 = 'HOST_3'
HOST_2_TUNNELING_IP = '20.0.0.2'
HOST_3_TUNNELING_IP = '20.0.0.3'
TEST_ROUTER_ID = 'router_id'
TEST_NETWORK_ID = 'network_id'
TEST_HA_NETWORK_ID = 'ha_network_id'
class TestL2PopulationDBTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TestL2PopulationDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
self._create_network()
def _create_network(self, network_id=TEST_NETWORK_ID):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
def _create_router(self, distributed=True, ha=False):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(l3_db.Router(id=TEST_ROUTER_ID))
self.ctx.session.add(l3_attrs_db.RouterExtraAttributes(
router_id=TEST_ROUTER_ID, distributed=distributed, ha=ha))
def _create_ha_router(self, distributed=False):
helpers.register_l3_agent(HOST_2)
helpers.register_ovs_agent(HOST_2, tunneling_ip=HOST_2_TUNNELING_IP)
# Register l3 agent on host3, which doesn't host any HA router.
# Tests should test that host3 is not a HA agent host.
helpers.register_l3_agent(HOST_3)
helpers.register_ovs_agent(HOST_3, tunneling_ip=HOST_3_TUNNELING_IP)
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=TEST_HA_NETWORK_ID))
self._create_router(distributed=distributed, ha=True)
for state, host in [(n_const.HA_ROUTER_STATE_ACTIVE, HOST),
(n_const.HA_ROUTER_STATE_STANDBY, HOST_2)]:
self._setup_port_binding(
network_id=TEST_HA_NETWORK_ID,
device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
host_state=state,
host=host)
def get_l3_agent_by_host(self, agent_host):
plugin = helpers.FakePlugin()
return plugin._get_agent_by_type_and_host(
self.ctx, constants.AGENT_TYPE_L3, agent_host)
def test_get_agent_by_host(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
agent = l2pop_db.get_agent_by_host(
self.ctx.session, helpers.HOST)
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_agent_by_host_no_candidate(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
agent = l2pop_db.get_agent_by_host(
self.ctx.session, helpers.HOST)
self.assertIsNone(agent)
def _setup_port_binding(self, **kwargs):
with self.ctx.session.begin(subtransactions=True):
mac = utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
port_id = uuidutils.generate_uuid()
network_id = kwargs.get('network_id', TEST_NETWORK_ID)
device_owner = kwargs.get('device_owner', '')
device_id = kwargs.get('device_id', '')
host = kwargs.get('host', helpers.HOST)
self.ctx.session.add(models_v2.Port(
id=port_id, network_id=network_id, mac_address=mac,
admin_state_up=True, status=constants.PORT_STATUS_ACTIVE,
device_id=device_id, device_owner=device_owner))
port_binding_cls = models.PortBinding
binding_kwarg = {'port_id': port_id,
'host': host,
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vnic_type': portbindings.VNIC_NORMAL}
if device_owner == constants.DEVICE_OWNER_DVR_INTERFACE:
port_binding_cls = models.DistributedPortBinding
binding_kwarg['router_id'] = TEST_ROUTER_ID
binding_kwarg['status'] = constants.PORT_STATUS_DOWN
self.ctx.session.add(port_binding_cls(**binding_kwarg))
if network_id == TEST_HA_NETWORK_ID:
agent = self.get_l3_agent_by_host(host)
haport_bindings_cls = l3_hamode_db.L3HARouterAgentPortBinding
habinding_kwarg = {'port_id': port_id,
'router_id': device_id,
'l3_agent_id': agent['id'],
'state': kwargs.get('host_state',
n_const.HA_ROUTER_STATE_ACTIVE)}
self.ctx.session.add(haport_bindings_cls(**habinding_kwarg))
def test_get_distributed_active_network_ports(self):
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
# Register a L2 agent + A bunch of other agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
tunnel_network_ports = l2pop_db.get_distributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(tunnel_network_ports))
_, agent = tunnel_network_ports[0]
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_distributed_active_network_ports_no_candidate(self):
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
# Register a bunch of non-L2 agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
tunnel_network_ports = l2pop_db.get_distributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(tunnel_network_ports))
def test_get_nondistributed_active_network_ports(self):
self._setup_port_binding(dvr=False)
# Register a L2 agent + A bunch of other agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(fdb_network_ports))
_, agent = fdb_network_ports[0]
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_nondistributed_active_network_ports_no_candidate(self):
self._setup_port_binding(dvr=False)
# Register a bunch of non-L2 agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(fdb_network_ports))
def test__get_ha_router_interface_ids_with_ha_dvr_snat_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(list(ha_iface_ids)))
def test__get_ha_router_interface_ids_with_ha_replicated_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_HA_REPLICATED_INT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(list(ha_iface_ids)))
def test__get_ha_router_interface_ids_with_no_ha_port(self):
self._create_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(list(ha_iface_ids)))
def test_active_network_ports_with_dvr_snat_port(self):
# Test to get agent hosting dvr snat port
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
# create DVR router
self._create_router()
# setup DVR snat port
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
helpers.register_dhcp_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(fdb_network_ports))
def test_active_network_ports_with_ha_dvr_snat_port(self):
# test to get HA agents hosting HA+DVR snat port
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
# create HA+DVR router
self._create_ha_router()
# setup HA snat port
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(fdb_network_ports))
ha_ports = l2pop_db.get_ha_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(2, len(ha_ports))
def test_active_port_count_with_dvr_snat_port(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
self._create_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
helpers.register_dhcp_agent()
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST_2, TEST_NETWORK_ID)
self.assertEqual(0, port_count)
def test_active_port_count_with_ha_dvr_snat_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST_2, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
def test_get_ha_agents_by_router_id(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
agents = l2pop_db.get_ha_agents_by_router_id(
self.ctx.session, TEST_ROUTER_ID)
ha_agents = [agent.host for agent in agents]
self.assertEqual(tools.UnorderedList([HOST, HOST_2]), ha_agents)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for the Hyper-V driver and related APIs.
"""
import io
import mox
import os
import platform
import shutil
import time
import uuid
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova.image import glance
from nova import test
from nova.tests import fake_network
from nova.tests.hyperv import db_fakes
from nova.tests.hyperv import fake
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
CONF = cfg.CONF
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
class HyperVAPITestCase(test.TestCase):
"""Unit tests for Hyper-V driver calls."""
def __init__(self, test_case_name):
self._mox = mox.Mox()
super(HyperVAPITestCase, self).__init__(test_case_name)
def setUp(self):
super(HyperVAPITestCase, self).setUp()
self._user_id = 'fake'
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
self._fetched_image = None
self._update_image_raise_exception = False
self._volume_target_portal = 'testtargetportal:3260'
self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
self._context = context.RequestContext(self._user_id, self._project_id)
self._instance_ide_disks = []
self._instance_ide_dvds = []
self._instance_volume_disks = []
self._test_vm_name = None
self._check_min_windows_version_satisfied = True
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.quantumv2.api.API')
self.flags(force_volumeutils_v1=True, group='hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
class FakeGlanceImageService(object):
def update(self_fake, context, image_id, image_metadata, f):
if self._update_image_raise_exception:
raise vmutils.HyperVException(
"Simulated update failure")
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
def fake_check_min_windows_version(fake_self, major, minor):
return self._check_min_windows_version_satisfied
self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
fake_check_min_windows_version)
def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_vmutils__init__(self, host='.'):
pass
vmutils.VMUtils.__init__ = fake_vmutils__init__
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
self._mox.StubOutWithMock(fake.PathUtils, 'copy')
self._mox.StubOutWithMock(fake.PathUtils, 'remove')
self._mox.StubOutWithMock(fake.PathUtils, 'rename')
self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
self._mox.StubOutWithMock(fake.PathUtils,
'get_instance_migr_revert_dir')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
self._mox.StubOutWithMock(vmutils.VMUtils,
'attach_volume_to_controller')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_controller_volume_paths')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
self._mox.StubOutWithMock(hostutils.HostUtils,
'is_cpu_feature_present')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'get_external_vswitch')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'create_vswitch_port')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'live_migrate_vm')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'check_live_migration_config')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'volume_in_mapping')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_target_from_disk_path')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'execute_log_out')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'login_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'execute_log_out')
self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'metadata_for_config_drive')
# Can't use StubOutClassWithMocks due to __exit__ and __enter__
self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
self._mox.StubOutWithMock(utils, 'execute')
def tearDown(self):
self._mox.UnsetStubs()
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
cpu_info = {'Architecture': 'fake',
'Name': 'fake',
'Manufacturer': 'ACME, Inc.',
'NumberOfCores': 2,
'NumberOfLogicalProcessors': 4}
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
windows_version = '6.2.9200'
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
m.MultipleTimes()
m = hostutils.HostUtils.get_windows_version()
m.AndReturn(windows_version)
self._mox.ReplayAll()
dic = self._conn.get_available_resource(None)
self._mox.VerifyAll()
self.assertEquals(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
self.assertEquals(dic['hypervisor_hostname'], platform.node())
self.assertEquals(dic['memory_mb'], tot_mem_kb / 1024)
self.assertEquals(dic['memory_mb_used'],
tot_mem_kb / 1024 - free_mem_kb / 1024)
self.assertEquals(dic['local_gb'], tot_hdd_b / 1024 ** 3)
self.assertEquals(dic['local_gb_used'],
tot_hdd_b / 1024 ** 3 - free_hdd_b / 1024 ** 3)
self.assertEquals(dic['hypervisor_version'],
windows_version.replace('.', ''))
def test_get_host_stats(self):
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
self._mox.ReplayAll()
dic = self._conn.get_host_stats(True)
self._mox.VerifyAll()
self.assertEquals(dic['disk_total'], tot_hdd_b / 1024 ** 3)
self.assertEquals(dic['disk_available'], free_hdd_b / 1024 ** 3)
self.assertEquals(dic['host_memory_total'], tot_mem_kb / 1024)
self.assertEquals(dic['host_memory_free'], free_mem_kb / 1024)
self.assertEquals(dic['disk_total'],
dic['disk_used'] + dic['disk_available'])
self.assertEquals(dic['host_memory_total'],
dic['host_memory_overhead'] +
dic['host_memory_free'])
def test_list_instances(self):
fake_instances = ['fake1', 'fake2']
vmutils.VMUtils.list_instances().AndReturn(fake_instances)
self._mox.ReplayAll()
instances = self._conn.list_instances()
self._mox.VerifyAll()
self.assertEquals(instances, fake_instances)
def test_get_info(self):
self._instance_data = self._get_instance_data()
summary_info = {'NumberOfProcessors': 2,
'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
'MemoryUsage': 1000,
'UpTime': 1}
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.get_vm_summary_info(func)
m.AndReturn(summary_info)
self._mox.ReplayAll()
info = self._conn.get_info(self._instance_data)
self._mox.VerifyAll()
self.assertEquals(info["state"], power_state.RUNNING)
def test_spawn_cow_image(self):
self._test_spawn_instance(True)
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
def _setup_spawn_config_drive_mocks(self, use_cdrom):
im = instance_metadata.InstanceMetadata(mox.IgnoreArg(),
content=mox.IsA(list),
extra_md=mox.IsA(dict))
cdb = self._mox.CreateMockAnything()
m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
m.AndReturn(cdb)
# __enter__ and __exit__ are required by "with"
cdb.__enter__().AndReturn(cdb)
cdb.make_drive(mox.IsA(str))
cdb.__exit__(None, None, None).AndReturn(None)
if not use_cdrom:
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
mox.IsA(str),
mox.IsA(str),
attempts=1)
fake.PathUtils.remove(mox.IsA(str))
m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk)
def _test_spawn_config_drive(self, use_cdrom):
self.flags(force_config_drive=True)
self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
self.flags(mkisofs_cmd='mkisofs.exe')
self._setup_spawn_config_drive_mocks(use_cdrom)
if use_cdrom:
expected_ide_disks = 1
expected_ide_dvds = 1
else:
expected_ide_disks = 2
expected_ide_dvds = 0
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds)
def test_spawn_config_drive(self):
self._test_spawn_config_drive(False)
def test_spawn_config_drive_cdrom(self):
self._test_spawn_config_drive(True)
def test_spawn_no_config_drive(self):
self.flags(force_config_drive=False)
expected_ide_disks = 1
expected_ide_dvds = 0
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds)
def test_spawn_nova_net_vif(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
fake_vswitch_path = 'fake vswitch path'
fake_vswitch_port = 'fake port'
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndReturn(fake_vswitch_path)
m = networkutils.NetworkUtils.create_vswitch_port(
fake_vswitch_path, mox.IsA(str))
m.AndReturn(fake_vswitch_port)
vmutils.VMUtils.set_nic_connection(mox.IsA(str), mox.IsA(str),
fake_vswitch_port)
self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
def test_spawn_nova_net_vif_no_vswitch_exception(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
setup_vif_mocks_func=setup_vif_mocks,
with_exception=True)
def _check_instance_name(self, vm_name):
return vm_name == self._instance_data['name']
def _test_vm_state_change(self, action, from_state, to_state):
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
to_state)
self._mox.ReplayAll()
action(self._instance_data)
self._mox.VerifyAll()
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
self._test_vm_state_change(self._conn.power_off,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_on(self):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()
def test_power_on_already_running(self):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()
def test_reboot(self):
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_REBOOT)
self._mox.ReplayAll()
self._conn.reboot(self._context, self._instance_data, network_info,
None)
self._mox.VerifyAll()
def _setup_destroy_mocks(self):
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([], []))
vmutils.VMUtils.destroy_vm(func)
def test_destroy(self):
self._instance_data = self._get_instance_data()
self._setup_destroy_mocks()
self._mox.ReplayAll()
self._conn.destroy(self._instance_data, None)
self._mox.VerifyAll()
def test_live_migration_unsupported_os(self):
self._check_min_windows_version_satisfied = False
self._conn = driver_hyperv.HyperVDriver(None)
self._test_live_migration(unsupported_os=True)
def test_live_migration_without_volumes(self):
self._test_live_migration()
def test_live_migration_with_volumes(self):
self._test_live_migration(with_volumes=True)
def test_live_migration_with_target_failure(self):
self._test_live_migration(test_failure=True)
def _test_live_migration(self, test_failure=False,
with_volumes=False,
unsupported_os=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
instance_name = instance_data['name']
fake_post_method = self._mox.CreateMockAnything()
if not test_failure and not unsupported_os:
fake_post_method(self._context, instance_data, dest_server,
False)
fake_recover_method = self._mox.CreateMockAnything()
if test_failure:
fake_recover_method(self._context, instance_data, dest_server,
False)
fake_ide_controller_path = 'fakeide'
fake_scsi_controller_path = 'fakescsi'
if with_volumes:
fake_scsi_disk_path = 'fake_scsi_disk_path'
fake_target_iqn = 'fake_target_iqn'
fake_target_lun = 1
fake_scsi_paths = {0: fake_scsi_disk_path}
else:
fake_scsi_paths = {}
if not unsupported_os:
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
instance_data['name'], dest_server)
if test_failure:
m.AndRaise(vmutils.HyperVException('Simulated failure'))
if with_volumes:
m.AndReturn([(fake_target_iqn, fake_target_lun)])
volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
else:
m.AndReturn([])
self._mox.ReplayAll()
try:
hyperv_exception_raised = False
unsupported_os_exception_raised = False
self._conn.live_migration(self._context, instance_data,
dest_server, fake_post_method,
fake_recover_method)
except vmutils.HyperVException:
hyperv_exception_raised = True
except NotImplementedError:
unsupported_os_exception_raised = True
self.assertTrue(not test_failure ^ hyperv_exception_raised)
self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
self._test_pre_live_migration(True, False)
def test_pre_live_migration_no_cow_image(self):
self._test_pre_live_migration(False, False)
def test_pre_live_migration_with_volumes(self):
self._test_pre_live_migration(False, True)
def _test_pre_live_migration(self, cow, with_volumes):
self.flags(use_cow_images=cow)
instance_data = self._get_instance_data()
instance = db.instance_create(self._context, instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
m.AndReturn(True)
if cow:
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
m = vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
m.AndReturn({'MaxInternalSize': 1024})
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
if with_volumes:
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
else:
block_device_info = None
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance,
block_device_info, network_info)
self._mox.VerifyAll()
if cow:
self.assertTrue(self._fetched_image is not None)
else:
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._update_image_raise_exception = True
self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _setup_snapshot_mocks(self):
expected_calls = [
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
fake_hv_snapshot_path = 'fake_snapshot_path'
fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
self._instance_data = self._get_instance_data()
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.take_vm_snapshot(func)
m.AndReturn(fake_hv_snapshot_path)
m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
m.AndReturn(fake_parent_vhd_path)
self._fake_dest_disk_path = None
def copy_dest_disk_path(src, dest):
self._fake_dest_disk_path = dest
m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m.WithSideEffects(copy_dest_disk_path)
self._fake_dest_base_disk_path = None
def copy_dest_base_disk_path(src, dest):
self._fake_dest_base_disk_path = dest
m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
m.WithSideEffects(copy_dest_base_disk_path)
def check_dest_disk_path(path):
return path == self._fake_dest_disk_path
def check_dest_base_disk_path(path):
return path == self._fake_dest_base_disk_path
func1 = mox.Func(check_dest_disk_path)
func2 = mox.Func(check_dest_base_disk_path)
# Make sure that the hyper-v base and differential VHDs are merged
vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
vhdutils.VHDUtils.merge_vhd(func1, func2)
def check_snapshot_path(snapshot_path):
return snapshot_path == fake_hv_snapshot_path
# Make sure that the Hyper-V snapshot is removed
func = mox.Func(check_snapshot_path)
vmutils.VMUtils.remove_vm_snapshot(func)
fake.PathUtils.rmtree(mox.IsA(str))
m = fake.PathUtils.open(func2, 'rb')
m.AndReturn(io.BytesIO(b'fake content'))
return (snapshot_name, func_call_matcher)
def test_snapshot(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._mox.ReplayAll()
self._conn.snapshot(self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
self.assertTrue(self._image_metadata and
"disk_format" in self._image_metadata and
self._image_metadata["disk_format"] == "vhd")
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _get_instance_data(self):
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
return db_fakes.get_fake_instance_data(instance_name,
self._project_id,
self._user_id)
def _spawn_instance(self, cow, block_device_info=None):
self.flags(use_cow_images=cow)
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._conn.spawn(self._context, instance, image,
injected_files=[], admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
def _add_ide_disk(self, vm_name, path, ctrller_addr,
drive_addr, drive_type):
if drive_type == constants.IDE_DISK:
self._instance_ide_disks.append(path)
elif drive_type == constants.IDE_DVD:
self._instance_ide_dvds.append(path)
def _add_volume_disk(self, vm_name, controller_path, address,
mounted_disk_path):
self._instance_volume_disks.append(mounted_disk_path)
def _check_img_path(self, image_path):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
boot_from_volume=False,
block_device_info=None):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool))
if not boot_from_volume:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
func = mox.Func(self._check_vm_name)
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
if boot_from_volume:
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
target_lun, target_portal, True)
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name), mox.IsA(str),
mox.IsA(str)).InAnyOrder()
if setup_vif_mocks_func:
setup_vif_mocks_func()
def _set_vm_name(self, vm_name):
self._test_vm_name = vm_name
def _check_vm_name(self, vm_name):
return vm_name == self._test_vm_name
def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
with_exception=False,
block_device_info=None,
boot_from_volume=False):
m = vmutils.VMUtils.vm_exists(mox.IsA(str))
m.WithSideEffects(self._set_vm_name).AndReturn(False)
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
mox.IsA(str), block_device_info)
m.AndReturn(boot_from_volume)
if not boot_from_volume:
m = vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
m.AndReturn({'MaxInternalSize': 1024})
if cow:
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
mox.IsA(str))
else:
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
boot_from_volume,
block_device_info)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
if with_exception:
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_vm_name))
m.AndReturn(True)
vmutils.VMUtils.destroy_vm(mox.Func(self._check_vm_name))
else:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
constants.HYPERV_VM_STATE_ENABLED)
def _test_spawn_instance(self, cow=True,
expected_ide_disks=1,
expected_ide_dvds=0,
setup_vif_mocks_func=None,
with_exception=False):
self._setup_spawn_instance_mocks(cow, setup_vif_mocks_func,
with_exception)
self._mox.ReplayAll()
self._spawn_instance(cow)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_ide_disks), expected_ide_disks)
self.assertEquals(len(self._instance_ide_dvds), expected_ide_dvds)
vhd_path = pathutils.PathUtils().get_vhd_path(self._test_vm_name)
self.assertEquals(vhd_path, self._instance_ide_disks[0])
def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
fake_mounted_disk, fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
if boot_from_volume:
m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
m.AndReturn(fake_controller_path)
fake_free_slot = 0
else:
m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
m.AndReturn(fake_controller_path)
fake_free_slot = 1
m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
fake_free_slot,
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
def _test_volumeutils_version(self, is_hyperv_2012=True,
force_volumeutils_v1=False):
self._check_min_windows_version_satisfied = is_hyperv_2012
self.flags(force_volumeutils_v1=force_volumeutils_v1, group='hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
is_volutils_v2 = isinstance(self._conn._volumeops._volutils,
volumeutilsv2.VolumeUtilsV2)
self.assertTrue((is_hyperv_2012 and not force_volumeutils_v1) ^
(not is_volutils_v2))
def test_volumeutils_version_hyperv_2012(self):
self._test_volumeutils_version(True, False)
def test_volumeutils_version_hyperv_2012_force_v1(self):
self._test_volumeutils_version(True, True)
def test_volumeutils_version_hyperv_2008R2(self):
self._test_volumeutils_version(False, False)
def test_attach_volume(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self._conn.attach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_volume_disks), 1)
def _mock_detach_volume(self, target_iqn, target_lun):
mount_point = '/dev/sdc'
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_free_slot = 1
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
def test_detach_volume(self):
instance_data = self._get_instance_data()
instance_name = instance_data['name']
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_detach_volume(target_iqn, target_lun)
self._mox.ReplayAll()
self._conn.detach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_boot_from_volume(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
boot_from_volume=True)
self._mox.ReplayAll()
self._spawn_instance(False, block_device_info)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_volume_disks), 1)
def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
with_exception=False):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
fake_local_ip = '10.0.0.1'
if same_host:
fake_dest_ip = fake_local_ip
else:
fake_dest_ip = '10.0.0.2'
fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
instance['name'])
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([fake_root_vhd_path], []))
m = hostutils.HostUtils.get_local_ips()
m.AndReturn([fake_local_ip])
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
m.AndReturn(fake_revert_path)
if same_host:
fake.PathUtils.makedirs(mox.IsA(str))
m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
if with_exception:
m.AndRaise(shutil.Error('Simulated copy error'))
else:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
if same_host:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
self._setup_destroy_mocks()
return (instance, fake_dest_ip, network_info)
def test_migrate_disk_and_power_off(self):
(instance,
fake_dest_ip,
network_info) = self._setup_test_migrate_disk_and_power_off_mocks()
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_same_host(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
same_host=True)
(instance, fake_dest_ip, network_info) = args
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
with_exception=True)
(instance, fake_dest_ip, network_info) = args
self._mox.ReplayAll()
self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_finish_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
self._mox.StubOutWithMock(fake.PathUtils, 'exists')
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
instance["image_ref"]))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'ParentPath': fake_parent_vhd_path,
'MaxInternalSize': 1})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.finish_migration(self._context, None, instance, "",
network_info, None, False, None)
self._mox.VerifyAll()
def test_confirm_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
self._mox.ReplayAll()
self._conn.confirm_migration(None, instance, network_info)
self._mox.VerifyAll()
def test_finish_revert_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
instance['name'])
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
m.AndReturn(fake_revert_path)
fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.finish_revert_migration(instance, network_info, None)
self._mox.VerifyAll()
|
|
from importlib import reload
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import pytest
import sys
import ray
import ray.experimental.array.remote as ra
import ray.experimental.array.distributed as da
import ray.cluster_utils
@pytest.fixture
def reload_modules():
modules = [ra.core, ra.random, ra.linalg, da.core, da.random, da.linalg]
[reload(module) for module in modules]
def test_remote_array_methods(ray_start_2_cpus, reload_modules):
# test eye
object_id = ra.eye.remote(3)
val = ray.get(object_id)
assert_almost_equal(val, np.eye(3))
# test zeros
object_id = ra.zeros.remote([3, 4, 5])
val = ray.get(object_id)
assert_equal(val, np.zeros([3, 4, 5]))
# test qr - pass by value
a_val = np.random.normal(size=[10, 11])
q_id, r_id = ra.linalg.qr.remote(a_val)
q_val = ray.get(q_id)
r_val = ray.get(r_id)
assert_almost_equal(np.dot(q_val, r_val), a_val)
# test qr - pass by objectid
a = ra.random.normal.remote([10, 13])
q_id, r_id = ra.linalg.qr.remote(a)
a_val = ray.get(a)
q_val = ray.get(q_id)
r_val = ray.get(r_id)
assert_almost_equal(np.dot(q_val, r_val), a_val)
def test_distributed_array_assemble(ray_start_2_cpus, reload_modules):
a = ra.ones.remote([da.BLOCK_SIZE, da.BLOCK_SIZE])
b = ra.zeros.remote([da.BLOCK_SIZE, da.BLOCK_SIZE])
x = da.DistArray([2 * da.BLOCK_SIZE, da.BLOCK_SIZE], np.array([[a], [b]]))
assert_equal(
x.assemble(),
np.vstack([
np.ones([da.BLOCK_SIZE, da.BLOCK_SIZE]),
np.zeros([da.BLOCK_SIZE, da.BLOCK_SIZE])
]))
def test_distributed_array_methods(ray_start_cluster_2_nodes, reload_modules):
x = da.zeros.remote([9, 25, 51], "float")
assert_equal(ray.get(da.assemble.remote(x)), np.zeros([9, 25, 51]))
x = da.ones.remote([11, 25, 49], dtype_name="float")
assert_equal(ray.get(da.assemble.remote(x)), np.ones([11, 25, 49]))
x = da.random.normal.remote([11, 25, 49])
y = da.copy.remote(x)
assert_equal(
ray.get(da.assemble.remote(x)), ray.get(da.assemble.remote(y)))
x = da.eye.remote(25, dtype_name="float")
assert_equal(ray.get(da.assemble.remote(x)), np.eye(25))
x = da.random.normal.remote([25, 49])
y = da.triu.remote(x)
assert_equal(
ray.get(da.assemble.remote(y)), np.triu(
ray.get(da.assemble.remote(x))))
x = da.random.normal.remote([25, 49])
y = da.tril.remote(x)
assert_equal(
ray.get(da.assemble.remote(y)), np.tril(
ray.get(da.assemble.remote(x))))
x = da.random.normal.remote([25, 49])
y = da.random.normal.remote([49, 18])
z = da.dot.remote(x, y)
w = da.assemble.remote(z)
u = da.assemble.remote(x)
v = da.assemble.remote(y)
assert_almost_equal(ray.get(w), np.dot(ray.get(u), ray.get(v)))
assert_almost_equal(ray.get(w), np.dot(ray.get(u), ray.get(v)))
# test add
x = da.random.normal.remote([23, 42])
y = da.random.normal.remote([23, 42])
z = da.add.remote(x, y)
assert_almost_equal(
ray.get(da.assemble.remote(z)),
ray.get(da.assemble.remote(x)) + ray.get(da.assemble.remote(y)))
# test subtract
x = da.random.normal.remote([33, 40])
y = da.random.normal.remote([33, 40])
z = da.subtract.remote(x, y)
assert_almost_equal(
ray.get(da.assemble.remote(z)),
ray.get(da.assemble.remote(x)) - ray.get(da.assemble.remote(y)))
# test transpose
x = da.random.normal.remote([234, 432])
y = da.transpose.remote(x)
assert_equal(
ray.get(da.assemble.remote(x)).T, ray.get(da.assemble.remote(y)))
# test numpy_to_dist
x = da.random.normal.remote([23, 45])
y = da.assemble.remote(x)
z = da.numpy_to_dist.remote(y)
w = da.assemble.remote(z)
assert_equal(
ray.get(da.assemble.remote(x)), ray.get(da.assemble.remote(z)))
assert_equal(ray.get(y), ray.get(w))
# test da.tsqr
for shape in [[123, da.BLOCK_SIZE], [7, da.BLOCK_SIZE],
[da.BLOCK_SIZE, da.BLOCK_SIZE], [da.BLOCK_SIZE, 7],
[10 * da.BLOCK_SIZE, da.BLOCK_SIZE]]:
x = da.random.normal.remote(shape)
K = min(shape)
q, r = da.linalg.tsqr.remote(x)
x_val = ray.get(da.assemble.remote(x))
q_val = ray.get(da.assemble.remote(q))
r_val = ray.get(r)
assert r_val.shape == (K, shape[1])
assert_equal(r_val, np.triu(r_val))
assert_almost_equal(x_val, np.dot(q_val, r_val))
assert_almost_equal(np.dot(q_val.T, q_val), np.eye(K))
# test da.linalg.modified_lu
def test_modified_lu(d1, d2):
print("testing dist_modified_lu with d1 = " + str(d1) + ", d2 = " +
str(d2))
assert d1 >= d2
m = ra.random.normal.remote([d1, d2])
q, r = ra.linalg.qr.remote(m)
l, u, s = da.linalg.modified_lu.remote(da.numpy_to_dist.remote(q))
q_val = ray.get(q)
ray.get(r)
l_val = ray.get(da.assemble.remote(l))
u_val = ray.get(u)
s_val = ray.get(s)
s_mat = np.zeros((d1, d2))
for i in range(len(s_val)):
s_mat[i, i] = s_val[i]
# Check that q - s = l * u.
assert_almost_equal(q_val - s_mat, np.dot(l_val, u_val))
# Check that u is upper triangular.
assert_equal(np.triu(u_val), u_val)
# Check that l is lower triangular.
assert_equal(np.tril(l_val), l_val)
for d1, d2 in [(100, 100), (99, 98), (7, 5), (7, 7), (20, 7), (20, 10)]:
test_modified_lu(d1, d2)
# test dist_tsqr_hr
def test_dist_tsqr_hr(d1, d2):
print("testing dist_tsqr_hr with d1 = " + str(d1) + ", d2 = " +
str(d2))
a = da.random.normal.remote([d1, d2])
y, t, y_top, r = da.linalg.tsqr_hr.remote(a)
a_val = ray.get(da.assemble.remote(a))
y_val = ray.get(da.assemble.remote(y))
t_val = ray.get(t)
y_top_val = ray.get(y_top)
r_val = ray.get(r)
tall_eye = np.zeros((d1, min(d1, d2)))
np.fill_diagonal(tall_eye, 1)
q = tall_eye - np.dot(y_val, np.dot(t_val, y_top_val.T))
# Check that q.T * q = I.
assert_almost_equal(np.dot(q.T, q), np.eye(min(d1, d2)))
# Check that a = (I - y * t * y_top.T) * r.
assert_almost_equal(np.dot(q, r_val), a_val)
for d1, d2 in [(123, da.BLOCK_SIZE), (7, da.BLOCK_SIZE), (da.BLOCK_SIZE,
da.BLOCK_SIZE),
(da.BLOCK_SIZE, 7), (10 * da.BLOCK_SIZE, da.BLOCK_SIZE)]:
test_dist_tsqr_hr(d1, d2)
def test_dist_qr(d1, d2):
print("testing qr with d1 = {}, and d2 = {}.".format(d1, d2))
a = da.random.normal.remote([d1, d2])
K = min(d1, d2)
q, r = da.linalg.qr.remote(a)
a_val = ray.get(da.assemble.remote(a))
q_val = ray.get(da.assemble.remote(q))
r_val = ray.get(da.assemble.remote(r))
assert q_val.shape == (d1, K)
assert r_val.shape == (K, d2)
assert_almost_equal(np.dot(q_val.T, q_val), np.eye(K))
assert_equal(r_val, np.triu(r_val))
assert_almost_equal(a_val, np.dot(q_val, r_val))
for d1, d2 in [(123, da.BLOCK_SIZE), (7, da.BLOCK_SIZE), (da.BLOCK_SIZE,
da.BLOCK_SIZE),
(da.BLOCK_SIZE, 7), (13, 21), (34, 35), (8, 7)]:
test_dist_qr(d1, d2)
test_dist_qr(d2, d1)
for _ in range(20):
d1 = np.random.randint(1, 35)
d2 = np.random.randint(1, 35)
test_dist_qr(d1, d2)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for dataset preparation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import math
import os
from absl import logging
import numpy as np
from PIL import Image
from PIL import ImageFile
import tensorflow.compat.v2 as tf
import cv2
gfile = tf.io.gfile
feature = tf.train.Feature
bytes_feature = lambda v: feature(bytes_list=tf.train.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=tf.train.Int64List(value=v))
float_feature = lambda v: feature(float_list=tf.train.FloatList(value=v))
def get_example(name, seq, seq_label=None, label_string=None,
frame_labels_string=None):
"""Returns a single SequenceExample for provided frames and labels in a video.
There is some replication of information in storing frame_labels_string but
it is useful to have strings as metadata with each sequence example.
Also assuming right now index of frame_labels_string and label_string
refer to classes being listed in frame_labels and label.
TODO (debidatta): Convert list of label strings to dict.
Args:
name: string, name of the sequence.
seq: dict, dict of list of frames and optionally per-frame labels in video.
seq_label: int, label of video as an integer.
label_string: string, label of video as a string.
frame_labels_string: list, frame level labels as string.
"""
# Add sequential or frame-level features.
seq_feats = {}
if 'video' in seq:
frames_bytes = [image_to_bytes(frame) for frame in seq['video']]
seq_feats['video'] = tf.train.FeatureList(feature=frames_bytes)
# Add per-frame labels.
if 'labels' in seq:
label_bytes = [int64_feature([label]) for label in seq['labels']]
seq_feats['frame_labels'] = tf.train.FeatureList(feature=label_bytes)
# Create FeatureLists.
feature_lists = tf.train.FeatureLists(feature_list=seq_feats)
# Add context or video-level features.
seq_len = len(seq['video'])
context_features_dict = {'name': bytes_feature([name.encode()]),
'len': int64_feature([seq_len])}
if seq_label is not None:
logging.info('Label for %s: %s', name, str(seq_label))
context_features_dict['label'] = int64_feature([seq_label])
if label_string:
context_features_dict['label_string'] = bytes_feature([label_string])
if frame_labels_string:
# Store as a single string as all context features should be Features or
# FeatureLists. Cannot combine types for now.
labels_string = ','.join(frame_labels_string)
context_features_dict['framelabels_string'] = bytes_feature([labels_string])
context_features = tf.train.Features(feature=context_features_dict)
# Create SequenceExample.
ex = tf.train.SequenceExample(context=context_features,
feature_lists=feature_lists)
return ex
def write_seqs_to_tfrecords(record_name, name_to_seqs, label,
frame_labels_string):
"""Write frames to a TFRecord file."""
writer = tf.io.TFRecordWriter(record_name)
for name in name_to_seqs:
ex = get_example(name, name_to_seqs[name],
seq_label=label,
frame_labels_string=frame_labels_string)
writer.write(ex.SerializeToString())
writer.close()
def image_to_jpegstring(image, jpeg_quality=95):
"""Convert image to a JPEG string."""
if not isinstance(image, Image.Image):
raise TypeError('Provided image is not a PIL Image object')
# This fix to PIL makes sure that we don't get an error when saving large
# jpeg files. This is a workaround for a bug in PIL. The value should be
# substantially larger than the size of the image being saved.
ImageFile.MAXBLOCK = 640 * 512 * 64
output_jpeg = io.BytesIO()
image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
return output_jpeg.getvalue()
def image_to_bytes(image_array):
"""Get bytes formatted image arrays."""
image = Image.fromarray(image_array)
im_string = bytes_feature([image_to_jpegstring(image)])
return im_string
def video_to_frames(video_filename, rotate, fps=0, resize=False,
width=224, height=224):
"""Returns all frames from a video.
Args:
video_filename: string, filename of video.
rotate: Boolean: if True, rotates video by 90 degrees.
fps: Integer, frames per second of video. If 0, it will be inferred from
metadata of video.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Raises:
ValueError: if fps is greater than the rate of video.
"""
logging.info('Loading %s', video_filename)
cap = cv2.VideoCapture(video_filename)
if fps == 0:
fps = cap.get(cv2.CAP_PROP_FPS)
keep_frequency = 1
else:
if fps > cap.get(cv2.CAP_PROP_FPS):
raise ValueError('Cannot sample at a frequency higher than FPS of video')
keep_frequency = int(float(cap.get(cv2.CAP_PROP_FPS)) / fps)
frames = []
timestamps = []
counter = 0
if cap.isOpened():
while True:
success, frame_bgr = cap.read()
if not success:
break
if counter % keep_frequency == 0:
# Convert BGR to RGB
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
if resize:
frame_rgb = cv2.resize(frame_rgb, (width, height))
if rotate:
frame_rgb = cv2.transpose(frame_rgb)
frame_rgb = cv2.flip(frame_rgb, 1)
frames.append(frame_rgb)
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0)
counter += 1
return frames, timestamps, fps
def merge_annotations(label, expected_n):
"""Merge annotations from label based on voting."""
annotations = {}
for k in range(expected_n):
segments = np.vstack([label[person_id][str(k)] for person_id in label])
annotations[k] = np.mean(segments, axis=0)
# Convert from microseconds to seconds.
annotations[k] /= 1e6
sorted_keys = sorted(annotations.keys())
start_sorted_idxes = np.argsort([annotations[k][0] for k in sorted_keys])
start_sorted_keys = [sorted_keys[idx] for idx in start_sorted_idxes]
# Add gaps.
for i in range(1, expected_n):
avg_time = 0.5 * (annotations[start_sorted_keys[i-1]][1] +
annotations[start_sorted_keys[i]][0])
annotations[start_sorted_keys[i-1]][1] = avg_time
annotations[start_sorted_keys[i]][0] = avg_time
return annotations
def label_timestamps(timestamps, annotations):
"""Each timestamp gets assigned a label according to annotation."""
labels = []
sorted_keys = sorted(annotations.keys())
first_segment = sorted_keys[np.argmin([annotations[k][0]
for k in sorted_keys])]
last_segment = sorted_keys[np.argmax([annotations[k][1]
for k in sorted_keys])]
for ts in timestamps:
assigned = 0
for k in sorted_keys:
min_t, max_t = annotations[k]
# If within the bounds provide the label in annotation.
if min_t <= ts < max_t:
labels.append(k)
assigned = 1
break
# If timestamp is higher than last recorded label's timestamp then just
# copy over the last label
elif ts >= annotations[last_segment][1]:
labels.append(last_segment)
assigned = 1
break
# If timestamp is lower than last recorded label's timestamp then just
# copy over the first label
elif ts < annotations[first_segment][0]:
labels.append(first_segment)
assigned = 1
break
# If timestamp was not assigned log a warning.
if assigned == 0:
logging.warning('Not able to insert: %s', ts)
return labels
def create_tfrecords(name, output_dir, input_dir, label_file, input_pattern,
files_per_shard, action_label, frame_labels,
expected_segments, orig_fps, rotate, resize, width,
height):
"""Create TFRecords from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
input_dir: string, path to input videos directory.
label_file: string, JSON file that contains annotations.
input_pattern: string, regex pattern to look up videos in directory.
files_per_shard: int, number of files to keep in each shard.
action_label: int, Label of actions in video.
frame_labels: list, list of string describing each class. Class label is
the index in list.
expected_segments: int, expected number of segments.
orig_fps: int, frame rate at which tfrecord will be created.
rotate: boolean, if True rotate videos by 90 degrees.
resize: boolean, if True resize to given height and width.
width: int, Width of frames.
height: int, Height of frames.
Raises:
ValueError: If invalid args are passed.
"""
if not gfile.exists(output_dir):
logging.info('Creating output directory: %s', output_dir)
gfile.makedirs(output_dir)
if label_file is not None:
with open(os.path.join(label_file)) as labels_file:
data = json.load(labels_file)
if not isinstance(input_pattern, list):
file_pattern = os.path.join(input_dir, input_pattern)
filenames = [os.path.basename(x) for x in gfile.glob(file_pattern)]
else:
filenames = []
for file_pattern in input_pattern:
file_pattern = os.path.join(input_dir, file_pattern)
filenames += [os.path.basename(x) for x in gfile.glob(file_pattern)]
filenames = sorted(filenames)
logging.info('Found %s files', len(filenames))
names_to_seqs = {}
num_shards = int(math.ceil(len(filenames)/files_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
for i, filename in enumerate(filenames):
seqs = {}
frames, video_timestamps, _ = video_to_frames(
os.path.join(input_dir, filename),
rotate,
orig_fps,
resize=resize,
width=width,
height=height)
seqs['video'] = frames
if label_file is not None:
video_id = filename[:-4]
if video_id in data:
video_labels = data[video_id]
else:
raise ValueError('Video id %s not found in labels file.' % video_id)
merged_annotations = merge_annotations(video_labels,
expected_segments)
seqs['labels'] = label_timestamps(video_timestamps, merged_annotations)
names_to_seqs[os.path.splitext(filename)[0]] = seqs
if (i + 1) % files_per_shard == 0 or i == len(filenames) - 1:
output_filename = os.path.join(
output_dir,
'%s-%s-of-%s.tfrecord' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
write_seqs_to_tfrecords(output_filename, names_to_seqs,
action_label, frame_labels)
shard_id += 1
names_to_seqs = {}
|
|
"""
There are two types of functions:
1) defined function like exp or sin that has a name and body
(in the sense that function can be evaluated).
e = exp
2) undefined function with a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) this isn't implemented yet: anonymous function or lambda function that has
no name but has body with dummy variables. Examples of anonymous function
creation:
f = Lambda(x, exp(x)*x)
f = Lambda(exp(x)*x) # free symbols in the expression define the number of arguments
f = exp * Lambda(x,x)
4) isn't implemented yet: composition of functions, like (sin+cos)(x), this
works in sympy core, but needs to be ported back to SymPy.
Example:
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print sympy.srepr(f(x).func)
Function('f')
>>> f(x).args
(x,)
"""
from basic import Basic, Atom, S, C
from basic import BasicMeta
from cache import cacheit
from itertools import repeat
from numbers import Rational, Integer
from symbol import Symbol
from multidimensional import vectorize
from sympy.utilities.decorator import deprecated
from sympy.utilities import all
from sympy import mpmath
class PoleError(Exception):
pass
class FunctionClass(BasicMeta):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __new__(cls, arg1, arg2, arg3=None, **options):
assert not options,`options`
if isinstance(arg1, type):
# the following code gets executed when one types
# FunctionClass(Function, "f")
# i.e. cls = FunctionClass, arg1 = Function, arg2 = "f"
# and we simply do an equivalent of:
# class f(Function):
# ...
# return f
ftype, name, signature = arg1, arg2, arg3
#XXX this probably needs some fixing:
assert ftype.__name__.endswith('Function'),`ftype`
attrdict = ftype.__dict__.copy()
attrdict['undefined_Function'] = True
if signature is not None:
attrdict['signature'] = signature
bases = (ftype,)
return type.__new__(cls, name, bases, attrdict)
else:
name, bases, attrdict = arg1, arg2, arg3
return type.__new__(cls, name, bases, attrdict)
def __repr__(cls):
return cls.__name__
class Function(Basic):
"""
Base class for applied functions.
Constructor of undefined classes.
"""
__metaclass__ = FunctionClass
is_Function = True
nargs = None
@vectorize(1)
@cacheit
def __new__(cls, *args, **options):
# NOTE: this __new__ is twofold:
#
# 1 -- it can create another *class*, which can then be instantiated by
# itself e.g. Function('f') creates a new class f(Function)
#
# 2 -- on the other hand, we instantiate -- that is we create an
# *instance* of a class created earlier in 1.
#
# So please keep, both (1) and (2) in mind.
# (1) create new function class
# UC: Function('f')
if cls is Function:
#when user writes Function("f"), do an equivalent of:
#taking the whole class Function(...):
#and rename the Function to "f" and return f, thus:
#In [13]: isinstance(f, Function)
#Out[13]: False
#In [14]: isinstance(f, FunctionClass)
#Out[14]: True
if len(args) == 1 and isinstance(args[0], str):
#always create Function
return FunctionClass(Function, *args)
return FunctionClass(Function, *args, **options)
else:
print args
print type(args[0])
raise TypeError("You need to specify exactly one string")
# (2) create new instance of a class created in (1)
# UC: Function('f')(x)
# UC: sin(x)
args = map(sympify, args)
# these lines should be refactored
for opt in ["nargs", "dummy", "comparable", "noncommutative", "commutative"]:
if opt in options:
del options[opt]
# up to here.
if options.get('evaluate') is False:
return Basic.__new__(cls, *args, **options)
evaluated = cls.eval(*args)
if evaluated is not None: return evaluated
# Just undefined functions have nargs == None
if not cls.nargs and hasattr(cls, 'undefined_Function'):
r = Basic.__new__(cls, *args, **options)
r.nargs = len(args)
return r
return Basic.__new__(cls, *args, **options)
@property
def is_commutative(self):
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
@classmethod
@deprecated
def canonize(cls, *args):
return cls.eval(*args)
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Example of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, C.Mul):
coeff, terms = arg.as_coeff_terms()
if coeff is not S.One:
return cls(coeff) * cls(C.Mul(*terms))
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if self == old:
return new
elif old.is_Function and new.is_Function:
if old == self.func:
if self.nargs is new.nargs or not new.nargs:
return new(*self.args)
# Written down as an elif to avoid a super-long line
elif isinstance(new.nargs,tuple) and self.nargs in new.nargs:
return new(*self.args)
return Basic._seq_subs(self, old, new)
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
return
# Convert all args to mpf or mpc
try:
args = [arg._to_mpmath(prec) for arg in self.args]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = func(*args)
finally:
mpmath.mp.prec = orig
return Basic._from_mpmath(v, prec)
def _eval_is_comparable(self):
if self.is_Function:
r = True
for s in self.args:
c = s.is_comparable
if c is None: return
if not c: r = False
return r
return
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
r = S.Zero
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
if isinstance(self.func, FunctionClass):
df = self.fdiff(i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
r = True
for a in self._args:
c = a.is_commutative
if c is None: return None
if not c: r = False
return r
def _eval_eq_nonzero(self, other):
if isinstance(other.func, self.func.__class__) and len(self)==len(other):
for a1,a2 in zip(self,other):
if not (a1==a2):
return False
return True
def as_base_exp(self):
return self, S.One
def count_ops(self, symbolic=True):
# f() args
return 1 + Add(*[ t.count_ops(symbolic) for t in self.args ])
def _eval_nseries(self, x, x0, n):
assert len(self.args) == 1
arg = self.args[0]
arg0 = arg.limit(x, 0)
from sympy import oo
if arg0 in [-oo, oo]:
raise PoleError("Cannot expand around %s" % (arg))
if arg0 is not S.Zero:
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
series = term
fact = S.One
for i in range(n-1):
i += 1
fact *= Rational(i)
e = e.diff(x)
term = e.subs(x, S.Zero)*(x**i)/fact
term = term.expand()
series += term
return series + C.Order(x**n, x)
return e1.nseries(x, x0, n)
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, x0, n)
l.append(g)
return Add(*l) + C.Order(x**n, x)
def _eval_is_polynomial(self, syms):
for arg in self.args:
if arg.has(*syms):
return False
return True
def _eval_expand_basic(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_multinomail'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
if deep:
func = self.func(*[ a.expand(deep, **hints) for a in self.args ])
else:
func = self.func(*self.args)
return C.re(func) + S.ImaginaryUnit * C.im(func)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_rewrite(self, pattern, rule, **hints):
if hints.get('deep', False):
args = [ a._eval_rewrite(pattern, rule, **hints) for a in self ]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args, **self._assumptions)
def fdiff(self, argindex=1):
if self.nargs is not None:
if isinstance(self.nargs, tuple):
nargs = self.nargs[-1]
else:
nargs = self.nargs
if not (1<=argindex<=nargs):
raise TypeError("argument index %r is out of range [1,%s]" % (argindex,nargs))
return Derivative(self,self.args[argindex-1],evaluate=False)
@classmethod
def _eval_apply_evalf(cls, arg):
arg = arg.evalf(prec)
#if cls.nargs == 1:
# common case for functions with 1 argument
#if arg.is_Number:
if arg.is_number:
func_evalf = getattr(arg, cls.__name__)
return func_evalf()
def _eval_as_leading_term(self, x):
"""General method for the leading term"""
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
@classmethod
def taylor_term(cls, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
x = sympify(x)
return cls(x).diff(x, n).subs(x, 0) * x**n / C.Factorial(n)
class WildFunction(Function, Atom):
"""
WildFunction() matches any expression but another WildFunction()
XXX is this as intended, does it work ?
"""
nargs = 1
def __new__(cls, name=None, **assumptions):
if name is None:
name = 'Wf%s' % (Symbol.dummycount + 1) # XXX refactor dummy counting
Symbol.dummycount += 1
obj = Function.__new__(cls, name, **assumptions)
obj.name = name
return obj
def matches(self, expr, repl_dict={}, evaluate=False):
if self in repl_dict:
if repl_dict[self] == expr:
return repl_dict
else:
return None
if self.nargs is not None:
if not hasattr(expr,'nargs') or self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
@classmethod
def _eval_apply_evalf(cls, arg):
return
@property
def is_number(self):
return False
class Derivative(Basic):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result or None.
Examples:
Derivative(Derivative(expr, x), y) -> Derivative(expr, x, y)
Derivative(expr, x, 3) -> Derivative(expr, x, x, x)
"""
is_Derivative = True
@staticmethod
def _symbolgen(*symbols):
"""
Generator of all symbols in the argument of the Derivative.
Example:
>> ._symbolgen(x, 3, y)
(x, x, x, y)
>> ._symbolgen(x, 10**6)
(x, x, x, x, x, x, x, ...)
The second example shows why we don't return a list, but a generator,
so that the code that calls _symbolgen can return earlier for special
cases, like x.diff(x, 10**6).
"""
last_s = sympify(symbols[len(symbols)-1])
for i in xrange(len(symbols)):
s = sympify(symbols[i])
next_s = None
if s != last_s:
next_s = sympify(symbols[i+1])
if isinstance(s, Integer):
continue
elif isinstance(s, Symbol):
# handle cases like (x, 3)
if isinstance(next_s, Integer):
# yield (x, x, x)
for copy_s in repeat(s,int(next_s)):
yield copy_s
else:
yield s
else:
yield s
def __new__(cls, expr, *symbols, **assumptions):
expr = sympify(expr)
if not symbols: return expr
symbols = Derivative._symbolgen(*symbols)
if expr.is_commutative:
assumptions["commutative"] = True
if "evaluate" in assumptions:
evaluate = assumptions["evaluate"]
del assumptions["evaluate"]
else:
evaluate = False
if not evaluate and not isinstance(expr, Derivative):
symbols = list(symbols)
if len(symbols) == 0:
# We make a special case for 0th derivative, because there
# is no good way to unambiguously print this.
return expr
obj = Basic.__new__(cls, expr, *symbols, **assumptions)
return obj
unevaluated_symbols = []
for s in symbols:
s = sympify(s)
if not isinstance(s, Symbol):
raise ValueError('Invalid literal: %s is not a valid variable' % s)
if not expr.has(s):
return S.Zero
obj = expr._eval_derivative(s)
if obj is None:
unevaluated_symbols.append(s)
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if not unevaluated_symbols:
return expr
return Basic.__new__(cls, expr, *unevaluated_symbols, **assumptions)
def _eval_derivative(self, s):
#print
#print self
#print s
#stop
if s not in self.symbols:
obj = self.expr.diff(s)
if isinstance(obj, Derivative):
return Derivative(obj.expr, *(self.symbols+obj.symbols))
return Derivative(obj, *self.symbols)
return Derivative(self.expr, *((s,)+self.symbols), **{'evaluate': False})
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return Derivative(expr, *self.symbols, **hints)
@property
def expr(self):
return self._args[0]
@property
def symbols(self):
return self._args[1:]
def _eval_subs(self, old, new):
if self==old:
return new
return Derivative(*map(lambda x: x._eval_subs(old, new), self.args), **{'evaluate': True})
def matches(self, expr, repl_dict={}, evaluate=False):
# this method needs a cleanup.
if self in repl_dict:
if repl_dict[self] == expr:
return repl_dict
else:
return None
if isinstance(expr, Derivative):
if len(expr.symbols) == len(self.symbols):
#print "MAYBE:",self, expr, repl_dict, evaluate
return Basic.matches(self, expr, repl_dict, evaluate)
#print "NONE:",self, expr, repl_dict, evaluate
return None
#print self, expr, repl_dict, evaluate
stop
if self.nargs is not None:
if self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
def _eval_lseries(self, x, x0):
stop
arg = self.args[0]
dx = self.args[1]
for term in arg.lseries(x, x0):
yield term.diff(dx)
def _eval_nseries(self, x, x0, n):
arg = self.args[0]
arg = arg.nseries(x, x0, n)
o = arg.getO()
dx = self.args[1]
if o:
return arg.removeO().diff(dx) + arg.getO()/dx
else:
return arg.removeO().diff(dx)
class Lambda(Function):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda(x, y, z, t, x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
Multivariate functions can be curries for partial applications:
>>> sum2numbers = Lambda(x, y, x+y)
>>> sum2numbers(1,2)
3
>>> plus1 = sum2numbers(1)
>>> plus1(3)
4
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
# a minimum of 2 arguments (parameter, expression) are needed
nargs = 2
def __new__(cls,*args):
assert len(args) >= 2,"Must have at least one parameter and an expression"
if len(args) == 2 and isinstance(args[0], (list, tuple)):
args = tuple(args[0])+(args[1],)
obj = Function.__new__(cls,*args)
obj.nargs = len(args)-1
return obj
@classmethod
def eval(cls,*args):
obj = Basic.__new__(cls, *args)
#use dummy variables internally, just to be sure
nargs = len(args)-1
expression = args[nargs]
funargs = [Symbol(arg.name, dummy=True) for arg in args[:nargs]]
#probably could use something like foldl here
for arg,funarg in zip(args[:nargs],funargs):
expression = expression.subs(arg,funarg)
funargs.append(expression)
obj._args = tuple(funargs)
return obj
def apply(self, *args):
"""Applies the Lambda function "self" to the arguments given.
This supports partial application.
Example:
>>> from sympy import Lambda
>>> from sympy.abc import x, y
>>> f = Lambda(x, x**2)
>>> f.apply(4)
16
>>> sum2numbers = Lambda(x,y,x+y)
>>> sum2numbers(1,2)
3
>>> plus1 = sum2numbers(1)
>>> plus1(3)
4
"""
nparams = self.nargs
assert nparams >= len(args),"Cannot call function with more parameters than function variables: %s (%d variables) called with %d arguments" % (str(self),nparams,len(args))
#replace arguments
expression = self.args[self.nargs]
for arg,funarg in zip(args,self.args[:nparams]):
expression = expression.subs(funarg,arg)
#curry the rest
if nparams != len(args):
unused_args = list(self.args[len(args):nparams])
unused_args.append(expression)
return Lambda(*tuple(unused_args))
return expression
def __call__(self, *args):
return self.apply(*args)
def __eq__(self, other):
if isinstance(other, Lambda):
if not len(self.args) == len(other.args):
return False
selfexpr = self.args[self.nargs]
otherexpr = other.args[other.nargs]
for selfarg,otherarg in zip(self.args[:self.nargs],other.args[:other.nargs]):
otherexpr = otherexpr.subs(otherarg,selfarg)
if selfexpr == otherexpr:
return True
# if self.args[1] == other.args[1].subs(other.args[0], self.args[0]):
# return True
return False
@vectorize(0)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
This function is vectorized, so you can pass a list for the arguments and
each argument will be mapped to each element of the list. For a single
symbol, you can just pass the symbol normally. For multiple symbols,
pass each group in a tuple. For example, do diff(f(x, y), [x, y]) to get
the derivatives of f(x, y) with respect to x and with respect to y, and
diff(f(x, y), [(x, x), (y, y)]) to get the derivatives of f(x, y) with
respect to x twice and with respect to y twice. You can also mix tuples
and single symbols.
Examples:
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
D(f(x), x, x, x)
>>> diff(f(x), x, 3)
D(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
cos(y)*sin(x)
>>> diff(f(x, y), [x, y])
[D(f(x, y), x), D(f(x, y), y)]
>>> diff(f(x, y), [(x, x), (y, y)])
[D(f(x, y), x, x), D(f(x, y), y, y)]
>>> diff(f(x, y), [(x, 2), y])
[D(f(x, y), x, x), D(f(x, y), y)]
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
See Also
http://documents.wolfram.com/v5/Built-inFunctions/AlgebraicComputation/Calculus/D.html
"""
# @vectorize(1) won't handle symbols in the way that we want, so we have to
# write the for loop manually.
if hasattr(symbols[0], '__iter__'):
retlist = []
for i in symbols[0]:
if hasattr(i, '__iter__'):
retlist.append(Derivative(f, *i, **kwargs))
else:
retlist.append(Derivative(f, i, **kwargs))
return retlist
kwargs.setdefault('evaluate', True)
return Derivative(f,*symbols, **kwargs)
@vectorize(0)
def expand(e, deep=True, power_base=True, power_exp=True, mul=True, \
log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints are applied with arbitrary order so your code shouldn't
depend on the way hints are passed to this method.
Hints evaluated unless explicitly set to False are:
basic, log, multinomial, mul, power_base, and power_exp
The following hints are supported but not applied unless set to True:
complex, func, and trig.
basic is a generic keyword for methods that want to be expanded
automatically. For example, Integral uses expand_basic to expand the
integrand. If you want your class expand methods to run automatically and
they don't fit one of the already automatic methods, wrap it around
_eval_expand_basic.
If deep is set to True, things like arguments of functions are
recursively expanded. Use deep=False to only expand on the top
level.
Also see expand_log, expand_mul, expand_complex, expand_trig,
and expand_func, which are wrappers around those expansion methods.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y, z
mul - Distributes multiplication over addition.
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
complex - Split an expression into real and imaginary parts.
>>> (x+y).expand(complex=True)
I*im(x) + I*im(y) + re(x) + re(y)
>>> cos(x).expand(complex=True)
cos(re(x))*cosh(im(x)) - I*sin(re(x))*sinh(im(x))
power_exp - Expand addition in exponents into multiplied bases.
>>> exp(x+y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x+y)).expand(power_exp=True)
2**x*2**y
power_base - Split powers of multiplied bases.
>>> ((x*y)**z).expand(power_base=True)
x**z*y**z
log - Pull out power of an argument as a coefficient and split logs products
into sums of logs. Note that these only work if the arguments of the log
function have the proper assumptions: the arguments must be positive and the
exponents must be real.
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(y*x**2)
>>> x, y = symbols('xy', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
trig - Do trigonometric expansions.
>>> cos(x+y).expand(trig=True)
cos(x)*cos(y) - sin(x)*sin(y)
func - Expand other functions.
>>> from sympy import gamma
>>> gamma(x+1).expand(func=True)
x*gamma(x)
multinomial - Expand (x + y + ...)**n where n is a positive integer.
>>> ((x+y+z)**2).expand(multinomial=True)
2*x*y + 2*x*z + 2*y*z + x**2 + y**2 + z**2
You can shut off methods that you don't want.
>>> (exp(x+y)*(x+y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x+y)*(x+y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x+y)*(x+y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
Use deep=False to only expand on the top level.
>>> exp(x+exp(x+y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x+exp(x+y)).expand(deep=False)
exp(x)*exp(exp(x + y))
Note: because hints are applied in arbitrary order, some hints may
prevent expansion by other hints if they are applied first. In
particular, mul may distribute multiplications and prevent log and
power_base from expanding them. Also, if mul is applied before multinomial,
the expression might not be fully distributed. The solution is to expand
with mul=False first, then run expand_mul if you need further expansion.
Examples:
>>> from sympy import expand_log, expand, expand_mul
>>> x, y, z = symbols('xyz', positive=True)
>> expand(log(x*(y+z))) # could be either one below
log(x*y + x*z)
log(x) + log(y + z)
>>> expand_log(log(x*y+x*z))
log(x*y + x*z)
>> expand(log(x*(y+z)), mul=False)
log(x) + log(y + z)
>> expand((x*(y+z))**x) # could be either one below
(x*y + x*z)**x
x**x*(y + z)**x
>>> expand((x*(y+z))**x, mul=False)
x**x*(y + z)**x
>> expand(x*(y+z)**2) # could be either one below
2*x*y*z + x*y**2 + x*z**2
x*(y + z)**2
>>> expand(x*(y+z)**2, mul=False)
x*(2*y*z + y**2 + z**2)
>>> expand_mul(_)
2*x*y*z + x*y**2 + x*z**2
"""
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, **hints)
# These are simple wrappers around single hints. Feel free to add ones for
# power_exp, power_base, multinomial, or basic if you need them.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('xy', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,\
power_base=False, basic=False, multinomial=False, log=False)
def expand_log(expr, deep=True):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('xy', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(2*log(y) + log(x))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,\
power_exp=False, power_base=False, multinomial=False, basic=False)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(1 + x)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_trig, sin, cos
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(cos(x)*sin(y) + cos(y)*sin(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_complex, I, im, re
>>> from sympy.abc import z
>>> expand_complex(z**(2*I))
I*im(z**(2*I)) + re(z**(2*I))
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
from sympify import sympify
from add import Add
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
from collections import defaultdict
from io import BytesIO
import os
import sys
try:
unicode
except NameError:
basestring = unicode = str
if sys.version_info[0] < 3:
from urllib2 import urlopen
from urlparse import urlparse
else:
from urllib.request import urlopen
from urllib.parse import urlparse
import re
import time
from sc2reader import utils
from sc2reader import log_utils
from sc2reader.resources import Resource, Replay, Map, GameSummary, Localization
@log_utils.loggable
class SC2Factory(object):
"""
The SC2Factory class acts as a generic loader interface for all
available to sc2reader resources. At current time this includes
:class:`~sc2reader.resources.Replay` and :class:`~sc2reader.resources.Map` resources. These resources can be
loaded in both singular and plural contexts with:
* :meth:`load_replay` - :class:`Replay`
* :meth:`load_replays` - generator<:class:`Replay`>
* :meth:`load_map` - :class:`Map`
* :meth:`load_maps` - : generator<:class:`Map`>
The load behavior can be configured in three ways:
* Passing options to the factory constructor
* Using the :meth:`configure` method of a factory instance
* Passing overried options into the load method
See the :meth:`configure` method for more details on configuration
options.
Resources can be loaded in the singular context from the following inputs:
* URLs - Uses the built-in package ``urllib``
* File path - Uses the built-in method ``open``
* File-like object - Must implement ``.read()``
* DepotFiles - Describes remote Battle.net depot resources
In the plural context the following inputs are acceptable:
* An iterable of the above inputs
* Directory path - Uses :meth:`~sc2reader.utils.get_files` with the appropriate extension to fine files.
"""
_resource_name_map = dict(replay=Replay, map=Map)
default_options = {
Resource: {"debug": False},
Replay: {"load_level": 4, "load_map": False},
}
def __init__(self, **options):
self.plugins = list()
# Bootstrap with the default options
self.options = defaultdict(dict)
for cls, options in self.default_options.items():
self.options[cls] = options.copy()
# Then configure with the options passed in
self.configure(**options)
# Primary Interface
def load_replay(self, source, options=None, **new_options):
"""
Loads a single sc2replay file. Accepts file path, url, or file object.
"""
return self.load(Replay, source, options, **new_options)
def load_replays(self, sources, options=None, **new_options):
"""
Loads a collection of sc2replay files, returns a generator.
"""
return self.load_all(
Replay, sources, options, extension="SC2Replay", **new_options
)
def load_localization(self, source, options=None, **new_options):
"""
Loads a single s2ml file. Accepts file path, url, or file object.
"""
return self.load(Localization, source, options, **new_options)
def load_localizations(self, sources, options=None, **new_options):
"""
Loads a collection of s2ml files, returns a generator.
"""
return self.load_all(
Localization, sources, options, extension="s2ml", **new_options
)
def load_map(self, source, options=None, **new_options):
"""
Loads a single s2ma file. Accepts file path, url, or file object.
"""
return self.load(Map, source, options, **new_options)
def load_maps(self, sources, options=None, **new_options):
"""
Loads a collection of s2ma files, returns a generator.
"""
return self.load_all(Map, sources, options, extension="s2ma", **new_options)
def load_game_summary(self, source, options=None, **new_options):
"""
Loads a single s2gs file. Accepts file path, url, or file object.
"""
return self.load(GameSummary, source, options, **new_options)
def load_game_summaries(self, sources, options=None, **new_options):
"""
Loads a collection of s2gs files, returns a generator.
"""
return self.load_all(
GameSummary, sources, options, extension="s2gs", **new_options
)
def configure(self, cls=None, **options):
"""
Configures the factory to use the supplied options. If cls is specified
the options will only be applied when loading that class
"""
if isinstance(cls, basestring):
cls = self._resource_name_map.get[cls.lower()]
cls = cls or Resource
self.options[cls].update(options)
def reset(self):
"""
Resets the options to factory defaults
"""
self.options = defaultdict(dict)
def register_plugin(self, cls, plugin):
"""
Registers the given Plugin to be run on classes of the supplied name.
"""
if isinstance(cls, basestring):
cls = self._resource_name_map.get(cls.lower(), Resource)
self.plugins.append((cls, plugin))
# Support Functions
def load(self, cls, source, options=None, **new_options):
options = options or self._get_options(cls, **new_options)
resource, filename = self._load_resource(source, options=options)
return self._load(cls, resource, filename=filename, options=options)
def load_all(self, cls, sources, options=None, **new_options):
options = options or self._get_options(cls, **new_options)
for resource, filename in self._load_resources(sources, options=options):
yield self._load(cls, resource, filename=filename, options=options)
# Internal Functions
def _load(self, cls, resource, filename, options):
obj = cls(resource, filename=filename, factory=self, **options)
for plugin in options.get("plugins", self._get_plugins(cls)):
obj = plugin(obj)
return obj
def _get_plugins(self, cls):
plugins = list()
for ext_cls, plugin in self.plugins:
if issubclass(cls, ext_cls):
plugins.append(plugin)
return plugins
def _get_options(self, cls, **new_options):
options = dict()
for opt_cls, cls_options in self.options.items():
if issubclass(cls, opt_cls):
options.update(cls_options)
options.update(new_options)
return options
def _load_resources(self, resources, options=None, **new_options):
"""
Collections of resources or a path to a directory
"""
options = options or self._get_options(Resource, **new_options)
# Path to a folder, retrieve all relevant files as the collection
if isinstance(resources, basestring):
resources = utils.get_files(resources, **options)
for resource in resources:
yield self._load_resource(resource, options=options)
def load_remote_resource_contents(self, resource, **options):
self.logger.info("Fetching remote resource: " + resource)
return urlopen(resource).read()
def load_local_resource_contents(self, location, **options):
# Extract the contents so we can close the file
with open(location, "rb") as resource_file:
return resource_file.read()
def _load_resource(self, resource, options=None, **new_options):
"""
http links, filesystem locations, and file-like objects
"""
options = options or self._get_options(Resource, **new_options)
if isinstance(resource, utils.DepotFile):
resource = resource.url
if isinstance(resource, basestring):
if re.match(r"https?://", resource):
contents = self.load_remote_resource_contents(resource, **options)
else:
directory = options.get("directory", "")
location = os.path.join(directory, resource)
contents = self.load_local_resource_contents(location, **options)
# BytesIO implements a fuller file-like object
resource_name = resource
resource = BytesIO(contents)
else:
# Totally not designed for large files!!
# We need a multiread resource, so wrap it in BytesIO
if not hasattr(resource, "seek"):
resource = BytesIO(resource.read())
resource_name = getattr(resource, "name", "Unknown")
if options.get("verbose", None):
print(resource_name)
return (resource, resource_name)
class CachedSC2Factory(SC2Factory):
def get_remote_cache_key(self, remote_resource):
# Strip the port and use the domain as the bucket
# and use the full path as the key
parseresult = urlparse(remote_resource)
bucket = re.sub(r":.*", "", parseresult.netloc)
key = parseresult.path.strip("/")
return (bucket, key)
def load_remote_resource_contents(self, remote_resource, **options):
cache_key = self.get_remote_cache_key(remote_resource)
if not self.cache_has(cache_key):
resource = super(CachedSC2Factory, self).load_remote_resource_contents(
remote_resource, **options
)
self.cache_set(cache_key, resource)
else:
resource = self.cache_get(cache_key)
return resource
def cache_has(self, cache_key):
raise NotImplemented()
def cache_get(self, cache_key):
raise NotImplemented()
def cache_set(self, cache_key, value):
raise NotImplemented()
class FileCachedSC2Factory(CachedSC2Factory):
"""
:param cache_dir: Local directory to cache files in.
Extends :class:`SC2Factory`.
Caches remote depot resources on the file system in the ``cache_dir``.
"""
def __init__(self, cache_dir, **options):
super(FileCachedSC2Factory, self).__init__(**options)
self.cache_dir = os.path.abspath(cache_dir)
if not os.path.isdir(self.cache_dir):
raise ValueError(
"cache_dir ({0}) must be an existing directory.".format(self.cache_dir)
)
elif not os.access(self.cache_dir, os.F_OK | os.W_OK | os.R_OK):
raise ValueError(
"Must have read/write access to {0} for local file caching.".format(
self.cache_dir
)
)
def cache_has(self, cache_key):
return os.path.exists(self.cache_path(cache_key))
def cache_get(self, cache_key, **options):
return self.load_local_resource_contents(self.cache_path(cache_key), **options)
def cache_set(self, cache_key, value):
cache_path = self.cache_path(cache_key)
bucket_dir = os.path.dirname(cache_path)
if not os.path.exists(bucket_dir):
os.makedirs(bucket_dir)
with open(cache_path, "wb") as out:
out.write(value)
def cache_path(self, cache_key):
return os.path.join(self.cache_dir, *(cache_key))
class DictCachedSC2Factory(CachedSC2Factory):
"""
:param cache_max_size: The max number of cache entries to hold in memory.
Extends :class:`SC2Factory`.
Caches remote depot resources in memory. Does not write to the file system.
The cache is effectively cleared when the process exits.
"""
def __init__(self, cache_max_size=0, **options):
super(DictCachedSC2Factory, self).__init__(**options)
self.cache_dict = dict()
self.cache_used = dict()
self.cache_max_size = cache_max_size
def cache_set(self, cache_key, value):
if self.cache_max_size and len(self.cache_dict) >= self.cache_max_size:
oldest_cache_key = min(self.cache_used.items(), key=lambda e: e[1])[0]
del self.cache_used[oldest_cache_key]
del self.cache_dict[oldest_cache_key]
self.cache_dict[cache_key] = value
self.cache_used[cache_key] = time.time()
def cache_get(self, cache_key):
self.cache_used[cache_key] = time.time()
return self.cache_dict[cache_key]
def cache_has(self, cache_key):
return cache_key in self.cache_dict
class DoubleCachedSC2Factory(DictCachedSC2Factory, FileCachedSC2Factory):
"""
:param cache_dir: Local directory to cache files in.
:param cache_max_size: The max number of cache entries to hold in memory.
Extends :class:`SC2Factory`.
Caches remote depot resources to the file system AND holds a subset of them
in memory for more efficient access.
"""
def __init__(self, cache_dir, cache_max_size=0, **options):
super(DoubleCachedSC2Factory, self).__init__(
cache_max_size, cache_dir=cache_dir, **options
)
def load_remote_resource_contents(self, remote_resource, **options):
cache_key = self.get_remote_cache_key(remote_resource)
if DictCachedSC2Factory.cache_has(self, cache_key):
return DictCachedSC2Factory.cache_get(self, cache_key)
if not FileCachedSC2Factory.cache_has(self, cache_key):
resource = SC2Factory.load_remote_resource_contents(
self, remote_resource, **options
)
FileCachedSC2Factory.cache_set(self, cache_key, resource)
else:
resource = FileCachedSC2Factory.cache_get(self, cache_key)
DictCachedSC2Factory.cache_set(self, cache_key, resource)
return resource
|
|
# MIT License
# Copyright (c) 2016 https://github.com/sndnv
# See the project's LICENSE file for the full text
import multiprocessing
import os
import pprint
import time
from cmd import Cmd
from datetime import datetime
from time import localtime, strftime
from cadb.utils import Config, FileSystem, Types, Build
from cadb.data import Processing
def monitor_file(source_data, compiler_config, check_interval):
"""
Starts monitoring the specified source file, compiling it when changes are detected.
:param source_data: the data for the source file to be monitored
:param compiler_config: the compiler configuration to be used
:param check_interval: the time to wait between checks for changes; in seconds
:return: nothing
"""
try:
last_hash = None
while True:
current_hash = FileSystem.get_file_hash(source_data.file_path)
if current_hash != last_hash:
print("Compiling file [{0}]".format(source_data.file_path))
Build.remove_object_file(source_data.object_file_path)
Build.create_object_file_dir(source_data.object_file_path)
return_code, stdout, stderr = Build.compile_object(source_data, compiler_config)
if len(stdout) > 0:
print("stdout for [{0}]: {1}".format(source_data.file_path, stdout))
if len(stderr) > 0:
print("stderr for [{0}]: {1}".format(source_data.file_path, stderr))
if return_code is 0:
print("Compilation completed successfully for file [{0}]".format(source_data.file_path))
else:
print(
"*** Compilation failed with return code [{0}] for file [{1}]".format(
return_code,
source_data.file_path
)
)
last_hash = current_hash
time.sleep(check_interval)
except KeyboardInterrupt:
print("Stopping 'autocompile' for [{0}] ...".format(source_data.file_path))
class InteractiveSession(Cmd):
intro = "Type 'help' to see a list of commands or 'qq' to exit"
prompt = ">: "
def __init__(self, available_actions, initial_config, initial_options, initial_db, initial_sources, logger,
completekey='tab', stdin=None, stdout=None):
"""
Creates a new interactive session with the supplied parameters.
Notes:\n
- The session needs to be started via a call to the 'cmdloop()' method.\n
- The process will block (while handling user commands) until the session is terminated.\n
- Any changes made to 'config' and 'options' in the session will be kept and used for any actions running
after the session has ended.
:param available_actions:
:param initial_config: the current config object
:param initial_options: the current options object
:param initial_db: the current DB object
:param initial_sources: the current sources data
:param logger: the core logger object
:param completekey: completion key (default is 'tab'); see 'cmd.Cmd'
:param stdin: input file object (default is None); see 'cmd.Cmd'
:param stdout: output file object (default is None); see 'cmd.Cmd'
"""
super().__init__(completekey, stdin, stdout)
self.id = strftime("%Y%m%d_%H%M%S", localtime())
self.actions = available_actions
self.config = initial_config
self.options = initial_options
self.db = initial_db
self.sources = initial_sources
self.logger = logger
self.pp = pprint.PrettyPrinter(indent=2)
self._autocompile_processes = {}
self._command_history = {'current': [], 'previous': []}
def emptyline(self):
pass
@staticmethod
def help_config():
print("Gets or sets the system config:")
print("\t>: config get -> retrieves the current system config")
print("\t>: config set a.b.c=\"some value\" -> merges the specified config path into the system config")
def do_config(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=1)
command = args[0]
if command == "set":
try:
new_config = Config.parse_from_string(args[1])
Config.merge(self.config, new_config)
self.sources = Processing.process_sources(self.config, self.options, self.db)
print("Done!")
except Exception as e:
print("*** Exception encountered while processing new config: [({0}) {1}]".format(e.__class__, e))
elif command == "get":
self.pp.pprint(self.config)
else:
print("*** Unexpected command for 'config': [{0}]".format(command))
else:
print("*** 'config' expects more parameters")
@staticmethod
def help_options():
print("Gets or (un)sets the system options:")
print("\t>: options get -> retrieves the current system config")
print("\t>: options set abc 123 -> sets the specified option (abc) to the specified value (123) as a string")
print("\t>: options unset abc -> removes the specified option (abc)")
def do_options(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=2)
command = args[0]
if command == "set":
try:
self.options[args[1]] = args[2]
self.sources = Processing.process_sources(self.config, self.options, self.db)
print("Done!")
except Exception as e:
print("*** Exception encountered while processing new options: [({0}) {1}]".format(e.__class__, e))
elif command == "unset":
self.options.pop(args[1], None)
self.sources = Processing.process_sources(self.config, self.options, self.db)
print("Done!")
elif command == "get":
self.pp.pprint(self.options)
else:
print("*** Unexpected command for 'options': [{0}]".format(command))
else:
print("*** 'options' expects more parameters")
@staticmethod
def help_autocompile():
print("Gets, starts or stops auto-compile processes:")
print("\t>: autocompile get -> lists all running auto-compile processes (if any)")
print("\t>: autocompile start <file path> -> starts a process for the specified file (if not running already)")
print("\t>: autocompile stop all -> stops all auto-compile processes (if any)")
print("\t>: autocompile stop <file path> -> stops the process for the specified file (if started)")
def do_autocompile(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=1)
command = args[0]
try:
if command == "start":
source_file = args[1].replace('"', '').replace('\'', '')
if source_file in self._autocompile_processes:
print(
"*** A process [{0}] already exists for file [{1}]".format(
self._autocompile_processes[source_file].pid, source_file
)
)
else:
source_data = self.sources[source_file]
compiler_config = self.config['builds'][self.options['build']]['compiler']
if source_data.file_type == Types.SourceType.Implementation:
new_process = multiprocessing.Process(
target=monitor_file,
args=(source_data, compiler_config, 2)
)
new_process.start()
self._autocompile_processes[source_file] = new_process
print("Initializing ...")
else:
print("*** File [{0}] is not an implementation file".format(source_file))
elif command == "stop":
source_file = args[1].replace('"', '').replace('\'', '')
if source_file == "all":
for current_process in self._autocompile_processes.values():
current_process.terminate()
current_process.join()
self._autocompile_processes.clear()
elif source_file not in self._autocompile_processes:
print("*** Process not found for file [{0}]".format(source_file))
else:
existing_process = self._autocompile_processes[source_file]
existing_process.terminate()
existing_process.join()
self._autocompile_processes.pop(source_file)
print("Done!")
elif command == "get":
if len(self._autocompile_processes) > 0:
for source_file, process in self._autocompile_processes.items():
print(
"{0}\t->\tPID: {1}\t(Alive: {2})".format(
source_file,
process.pid,
"Yes" if process.is_alive() else "No"
)
)
else:
print("No processes found")
else:
print("*** Unexpected command for 'autocompile': [{0}]".format(command))
except Exception as e:
print(
"*** Exception encountered while processing action 'autocompile': [({0}) {1}]".format(
e.__class__,
e
)
)
else:
print("*** 'autocompile' expects more parameters")
@staticmethod
def help_session():
print("Gets, saves or loads interactive session configs:")
print("\t>: session get -> lists all available session configs in the build dir (if any)")
print("\t>: session save -> saves the current session config in the build dir")
print("\t>: session load <file path> -> loads the session config from the specified path")
def do_session(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=1)
command = args[0]
build_dir = self.config['builds'][self.options['build']]['paths']['build']
try:
if command == "save":
config = self.config.copy()
Config.merge(
config,
{
'session_options': self.options,
'command_history': self._command_history['current']
}
)
FileSystem.store_json_file("{0}{1}session_{2}.conf".format(build_dir, os.sep, self.id), config)
print("Done!")
elif command == "load":
session_config = FileSystem.load_json_file(args[1])
session_options = session_config.pop('session_options')
command_history = session_config.pop('command_history')
self.config = session_config
self.options = session_options
self._command_history['previous'] = command_history
self.sources = Processing.process_sources(self.config, self.options, self.db)
print("Done!")
elif command == "get":
saved_sessions = FileSystem.get_session_files_list(build_dir)
saved_sessions.sort()
if len(saved_sessions) > 0:
for current_session in saved_sessions:
print(current_session)
else:
print("No saved sessions found")
else:
print("*** Unexpected command for 'session': [{0}]".format(command))
except Exception as e:
print(
"*** Exception encountered while processing action 'session': [({0}) {1}]".format(
e.__class__,
e
)
)
else:
print("*** 'session' expects more parameters")
@staticmethod
def help_build():
print("Executes the 'build' action:")
print("\t>: build -> do action with current config, options and sources")
print("\t>: build <file path> -> do action for specified source only (replaces 'source-file' in options)")
print("\t>: build all -> do action for all sources (ignores 'source-file' in options)")
def do_build(self, args):
if len(self._autocompile_processes) > 0:
print("*** Cannot start a build while there are active auto-compile processes")
else:
try:
source_file = args.replace('"', '').replace('\'', '') if len(args) > 0 else None
if source_file is None:
self._run_timed_action('build')
else:
options = self.options.copy()
if source_file.lower() == "all":
options.pop("source-file")
else:
options["source-file"] = source_file
self._run_timed_action('build', with_options=options)
self.sources = Processing.process_sources(self.config, self.options, self.db)
except Exception as e:
print(
"*** Exception encountered while processing action 'build': [({0}) {1}]".format(
e.__class__,
e
)
)
@staticmethod
def help_clean():
print("Executes the 'clean' action:")
print("\t>: clean -> do action with current config, options and sources")
print("\t>: clean <file path> -> do action for specified source only (replaces 'source-file' in options)")
print("\t>: clean all -> do action for all sources (ignores 'source-file' in options)")
def do_clean(self, args):
if len(self._autocompile_processes) > 0:
print("*** Cannot start a build while there are active auto-compile processes")
else:
try:
source_file = args.replace('"', '').replace('\'', '') if len(args) > 0 else None
if source_file is None:
self._run_timed_action('clean')
else:
options = self.options.copy()
if source_file.lower() == "all":
options.pop("source-file")
else:
options["source-file"] = source_file
self._run_timed_action('clean', with_options=options)
self.sources = Processing.process_sources(self.config, self.options, self.db)
except Exception as e:
print(
"*** Exception encountered while processing action 'clean': [({0}) {1}]".format(
e.__class__,
e
)
)
@staticmethod
def help_deps():
print("Executes the 'deps' action with the current config, options and sources.")
def do_deps(self, _):
try:
self._run_timed_action('deps')
except Exception as e:
print(
"*** Exception encountered while processing action 'deps': [({0}) {1}]".format(
e.__class__,
e
)
)
@staticmethod
def help_graph():
print("Executes the 'graph' action with the current config, options and sources.")
def do_graph(self, _):
try:
self._run_timed_action('graph')
except Exception as e:
print(
"*** Exception encountered while processing action 'graph': [({0}) {1}]".format(
e.__class__,
e
)
)
@staticmethod
def help_stats():
print("Executes the 'stats' action with the current config, options and sources.")
def do_stats(self, _):
try:
self._run_timed_action('stats')
except Exception as e:
print(
"*** Exception encountered while processing action 'stats': [({0}) {1}]".format(
e.__class__,
e
)
)
@staticmethod
def help_sources():
print("Gets or refreshes the available sources:")
print("\t>: sources get [filter] -> retrieves a sorted list of source files; accepts an optional filter string")
print("\t>: sources refresh -> performs sources processing with the current config and options")
def do_sources(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=1)
command = args[0]
try:
if command == "refresh":
self.sources = Processing.process_sources(self.config, self.options, self.db)
print("Done!")
elif command == "get":
sources_filter = args[1].lower() if len(args) > 1 else None
sources = list(self.sources.keys())
if sources_filter is not None:
sources = [current for current in sources if sources_filter in current.lower()]
sources.sort()
for source_path in sources:
print(source_path)
else:
print("*** Unexpected command for 'sources': [{0}]".format(command))
except Exception as e:
print(
"*** Exception encountered while processing action 'sources': [({0}) {1}]".format(
e.__class__,
e
)
)
else:
print("*** 'sources' expects more parameters")
@staticmethod
def help_history():
print("Gets or clears command histories:")
print("\t>: history [current] -> retrieves the current session's command history")
print("\t>: history previous -> retrieves the loaded session's command history (if any)")
print("\t>: history clear -> clears the current session's command history")
def do_history(self, args):
if len(args) > 0:
args = args.split(" ", maxsplit=1)
command = args[0]
if command == "previous":
selected_history = self._command_history['previous']
elif command == "current":
selected_history = self._command_history['current']
elif command == "clear":
self._command_history['current'].clear()
selected_history = []
else:
print("*** Unexpected command for 'history': [{0}]".format(command))
selected_history = []
else:
selected_history = self._command_history['current']
if len(selected_history) > 0:
for index, command in enumerate(selected_history):
print("[{0:03d}]\t[{1}]".format(index, command))
else:
print("No commands found")
@staticmethod
def help_qq():
print("Exits the interactive session and allows any remaining actions to complete.")
def do_qq(self, _):
for current_process in self._autocompile_processes.values():
current_process.terminate()
current_process.join()
self._autocompile_processes.clear()
return True
def precmd(self, line):
if line and line.strip():
self._command_history['current'].append(line.strip())
return line
def _run_timed_action(self, action, with_options=None):
"""
Runs the specified action with the supplied options (or the default ones).
:param action: the action to be run
:param with_options: the options to use for the action (if not supplied, the default options are used)
:return: nothing
"""
action_start = datetime.now()
options = with_options if with_options is not None else self.options
self.actions[action](self.config, options, self.db, self.sources, self.logger)
action_end = datetime.now()
self.logger.info(
"Action in session [{0}] completed in [{1:.2f}] seconds".format(
self.id,
(action_end - action_start).total_seconds()
),
extra={'action': action}
)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 11:52:11 2017
@author: lracuna
"""
from vision.camera import *
from vision.plane import Plane
from vision.circular_plane import CircularPlane
import vision.error_functions as ef
import gdescent.hpoints_gradient as gd
from ippe import homo2d
from homographyHarker.homographyHarker import homographyHarker as hh
from solve_ippe import pose_ippe_both, pose_ippe_best
from solve_pnp import pose_pnp
import cv2
## CREATE A SIMULATED CAMERA
cam = Camera()
cam.set_K(fx = 800,fy = 800,cx = 640,cy = 480)
cam.set_width_heigth(1280,960)
## DEFINE CAMERA POSE LOOKING STRAIGTH DOWN INTO THE PLANE MODEL
#cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(179.0))
#cam.set_t(0.0,-0.0,0.5, frame='world')
cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(140.0))
cam.set_t(0.0,-1,1.5, frame='world')
## Define a Display plane
pl = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.3,0.3), n = (2,2))
pl = CircularPlane()
pl.random(n =4, r = 0.01, min_sep = 0.01)
## CREATE A SET OF IMAGE POINTS FOR VALIDATION OF THE HOMOGRAPHY ESTIMATION
validation_plane = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.3,0.3), n = (4,4))
validation_plane.uniform()
## we create the gradient for the point distribution
normalize= False
n = 0.000001 #condition number norm
gradient = gd.create_gradient(metric='condition_number', n = n)
#normalize= True
#n = 0.0001 #condition number norm
#gradient = gd.create_gradient(metric='condition_number', n = n)
#gradient = gd.create_gradient(metric='pnorm_condition_number')
#gradient = gd.create_gradient(metric='volker_metric')
objectPoints_des = pl.get_points()
# we now replace the first 4 points with the border positions
#pl.uniform()
#objectPoints_des[:,0:4] = pl.get_points()
#define the plots
#one Figure for image an object points
fig1 = plt.figure('Image and Object points')
ax_image = fig1.add_subplot(211)
ax_object = fig1.add_subplot(212)
#another figure for Homography error and condition numbers
fig2 = plt.figure('Error and condition numbers')
ax_error = plt.subplot(311)
ax_cond = plt.subplot(312, sharex = ax_error)
ax_cond_norm = plt.subplot(313, sharex = ax_error)
#another figure for Pose errors
fig3 = plt.figure('Pose estimation errors')
ax_t_error_ippe1 = fig3.add_subplot(321)
ax_r_error_ippe1 = fig3.add_subplot(322)
ax_t_error_ippe2 = fig3.add_subplot(323)
ax_r_error_ippe2 = fig3.add_subplot(324)
ax_t_error_pnp = fig3.add_subplot(325)
ax_r_error_pnp = fig3.add_subplot(326)
imagePoints_des = np.array(cam.project(objectPoints_des, False))
objectPoints_list = []
imagePoints_list = []
transfer_error_list = []
cond_list = []
cond_norm_list = []
ippe_tvec_error_list1 = []
ippe_rmat_error_list1 = []
ippe_tvec_error_list2 = []
ippe_rmat_error_list2 = []
pnp_tvec_error_list = []
pnp_rmat_error_list = []
new_objectPoints = objectPoints_des
new_imagePoints = np.array(cam.project(new_objectPoints, False))
homography_iters = 100
for i in range(200):
# Xo = np.copy(new_objectPoints[[0,1,3],:]) #without the z coordinate (plane)
# Xi = np.copy(new_imagePoints)
# Aideal = ef.calculate_A_matrix(Xo, Xi)
input_list = gd.extract_objectpoints_vars(new_objectPoints)
input_list.append(np.array(cam.P))
input_list.append(None)
mat_cond = gd.matrix_condition_number_autograd(*input_list, normalize = False)
input_list = gd.extract_objectpoints_vars(new_objectPoints)
input_list.append(np.array(cam.P))
input_list.append(None)
mat_cond_normalized = gd.matrix_condition_number_autograd(*input_list, normalize = True)
cond_list.append(mat_cond)
cond_norm_list.append(mat_cond_normalized)
##HOMOGRAPHY ERRORS
## TRUE VALUE OF HOMOGRAPHY OBTAINED FROM CAMERA PARAMETERS
Hcam = cam.homography_from_Rt()
##We add noise to the image points and calculate the noisy homography
transfer_error_loop = []
ippe_tvec_error_loop1 = []
ippe_rmat_error_loop1 = []
ippe_tvec_error_loop2 = []
ippe_rmat_error_loop2 = []
pnp_tvec_error_loop = []
pnp_rmat_error_loop = []
for j in range(homography_iters):
new_imagePoints_noisy = cam.addnoise_imagePoints(new_imagePoints, mean = 0, sd = 1)
#Calculate the pose using IPPE (solution with least repro error)
normalizedimagePoints = cam.get_normalized_pixel_coordinates(new_imagePoints_noisy)
ippe_tvec1, ippe_rmat1, ippe_tvec2, ippe_rmat2 = pose_ippe_both(new_objectPoints, normalizedimagePoints, debug = False)
ippeCam1 = cam.clone_withPose(ippe_tvec1, ippe_rmat1)
ippeCam2 = cam.clone_withPose(ippe_tvec2, ippe_rmat2)
#Calculate the pose using solvepnp
debug = False
pnp_tvec, pnp_rmat = pose_pnp(new_objectPoints, new_imagePoints_noisy, cam.K, debug, cv2.SOLVEPNP_ITERATIVE,False)
pnpCam = cam.clone_withPose(pnp_tvec, pnp_rmat)
#Calculate errors
pnp_tvec_error, pnp_rmat_error = ef.calc_estimated_pose_error(cam.get_tvec(), cam.R, pnpCam.get_tvec(), pnp_rmat)
pnp_tvec_error_loop.append(pnp_tvec_error)
pnp_rmat_error_loop.append(pnp_rmat_error)
ippe_tvec_error1, ippe_rmat_error1 = ef.calc_estimated_pose_error(cam.get_tvec(), cam.R, ippeCam1.get_tvec(), ippe_rmat1)
ippe_tvec_error2, ippe_rmat_error2 = ef.calc_estimated_pose_error(cam.get_tvec(), cam.R, ippeCam2.get_tvec(), ippe_rmat2)
ippe_tvec_error_loop1.append(ippe_tvec_error1)
ippe_rmat_error_loop1.append(ippe_rmat_error1)
ippe_tvec_error_loop2.append(ippe_tvec_error2)
ippe_rmat_error_loop2.append(ippe_rmat_error2)
#Homography Estimation from noisy image points
Xo = new_objectPoints[[0,1,3],:]
Xi = new_imagePoints_noisy
#Hnoisy,A_t_ref,H_t = homo2d.homography2d(Xo,Xi)
#Hnoisy = Hnoisy/Hnoisy[2,2]
Hnoisy = hh(Xo,Xi)
## ERRORS FOR THE NOISY HOMOGRAPHY
## VALIDATION OBJECT POINTS
validation_objectPoints =validation_plane.get_points()
validation_imagePoints = np.array(cam.project(validation_objectPoints, False))
Xo = np.copy(validation_objectPoints)
Xo = np.delete(Xo, 2, axis=0)
Xi = np.copy(validation_imagePoints)
transfer_error_loop.append(ef.validation_points_error(Xi, Xo, Hnoisy))
transfer_error_list.append(np.mean(transfer_error_loop))
ippe_tvec_error_list1.append(np.mean(ippe_tvec_error_loop1))
ippe_rmat_error_list1.append(np.mean(ippe_rmat_error_loop1))
ippe_tvec_error_list2.append(np.mean(ippe_tvec_error_loop2))
ippe_rmat_error_list2.append(np.mean(ippe_rmat_error_loop2))
pnp_tvec_error_list.append(np.mean(pnp_tvec_error_loop))
pnp_rmat_error_list.append(np.mean(pnp_rmat_error_loop))
#PLOT IMAGE POINTS
plt.sca(ax_image)
plt.ion()
if i==0:
plt.cla()
cam.plot_plane(pl)
plt.plot(imagePoints_des[0],imagePoints_des[1],'x',color = 'black',)
ax_image.set_aspect('equal', 'datalim')
ax_image.cla()
cam.plot_plane(pl)
ax_image.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'blue',)
ax_image.set_xlim(0,1280)
ax_image.set_ylim(0,960)
ax_image.invert_yaxis()
ax_image.set_title('Image Points')
ax_object.set_title('Object Points')
plt.show()
plt.pause(0.01)
#PLOT OBJECT POINTS
if i==0:
ax_object.cla()
ax_object.plot(objectPoints_des[0],objectPoints_des[1],'x',color = 'black',)
ax_object.set_aspect('equal', 'datalim')
ax_object.plot(new_objectPoints[0],new_objectPoints[1],'.',color = 'blue',)
plt.show()
plt.pause(0.001)
#PLOT TRANSFER ERROR
plt.sca(ax_error)
plt.ion()
ax_error.cla()
ax_error.plot(transfer_error_list)
#PLOT CONDITION NUMBER
plt.sca(ax_cond)
plt.ion()
ax_cond.cla()
ax_cond.plot(cond_list)
#PLOT CONDITION NUMBER NORMALIZED
plt.sca(ax_cond_norm)
plt.ion()
ax_cond_norm.cla()
ax_cond_norm.plot(cond_norm_list)
plt.setp(ax_error.get_xticklabels(), visible=False)
plt.setp(ax_cond.get_xticklabels(), visible=False)
ax_error.set_title('Geometric Transfer error of the validation points')
ax_cond.set_title('Condition number of the A matrix')
ax_cond_norm.set_title('Condition number of the Normalized A matrix')
plt.show()
plt.pause(0.001)
plt.sca(ax_t_error_ippe1)
plt.ion()
ax_t_error_ippe1.cla()
ax_t_error_ippe1.plot(ippe_tvec_error_list1)
plt.sca(ax_r_error_ippe1)
plt.ion()
ax_r_error_ippe1.cla()
ax_r_error_ippe1.plot(ippe_rmat_error_list1)
plt.sca(ax_t_error_ippe2)
plt.ion()
ax_t_error_ippe2.cla()
ax_t_error_ippe2.plot(ippe_tvec_error_list2)
plt.sca(ax_r_error_ippe2)
plt.ion()
ax_r_error_ippe2.cla()
ax_r_error_ippe2.plot(ippe_rmat_error_list2)
plt.sca(ax_t_error_pnp)
plt.ion()
ax_t_error_pnp.cla()
ax_t_error_pnp.plot(pnp_tvec_error_list)
plt.sca(ax_r_error_pnp)
plt.ion()
ax_r_error_pnp.cla()
ax_r_error_pnp.plot(pnp_rmat_error_list)
ax_t_error_ippe1.set_title('Translation error (in percent) for IPPE Pose 1')
ax_r_error_ippe1.set_title('Rotation error (Angle) for IPPE Pose 1')
ax_t_error_ippe2.set_title('Translation error (in percent) for IPPE Pose 2')
ax_r_error_ippe2.set_title('Rotation error (Angle) for IPPE Pose 2')
ax_t_error_pnp.set_title('Translation error (in percent) for PnP Pose')
ax_r_error_pnp.set_title('Rotation error (Angle) for PnP Pose')
plt.show()
plt.pause(0.001)
print "Iteration: ", i
print "Transfer Error: ", np.mean(transfer_error_loop)
print "Mat cond:", mat_cond
print "Mat cond normalized:", mat_cond_normalized
print "Points", new_objectPoints
print "dx1,dy1 :", gradient.dx1_eval,gradient.dy1_eval
print "dx2,dy2 :", gradient.dx2_eval,gradient.dy2_eval
print "dx3,dy3 :", gradient.dx3_eval,gradient.dy3_eval
print "dx4,dy4 :", gradient.dx4_eval,gradient.dy4_eval
print "------------------------------------------------------"
## GRADIENT DESCENT
objectPoints_list.append(new_objectPoints)
imagePoints_list.append(new_imagePoints)
objectPoints = np.copy(new_objectPoints)
gradient = gd.evaluate_gradient(gradient,objectPoints, np.array(cam.P), normalize)
new_objectPoints = gd.update_points(gradient, objectPoints, limitx=0.15,limity=0.15)
new_imagePoints = np.array(cam.project(new_objectPoints, False))
#plt.figure('Image Points')
#plt.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'red',)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.