repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
jbonofre/beam | refs/heads/master | sdks/python/apache_beam/examples/complete/estimate_pi.py | 7 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A workflow that uses a simple Monte Carlo method to estimate π.
The algorithm computes the fraction of points drawn uniformly within the unit
square that also fall in the quadrant of the unit circle that overlaps the
square. A simple area calculation shows that this fraction should be π/4, so
we multiply our counts ratio by four to estimate π.
"""
from __future__ import absolute_import
import argparse
import json
import logging
import random
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.typehints import Any
from apache_beam.typehints import Iterable
from apache_beam.typehints import Tuple
@beam.typehints.with_output_types(Tuple[int, int, int])
@beam.typehints.with_input_types(int)
def run_trials(runs):
"""Run trials and return a 3-tuple representing the results.
Args:
runs: Number of trial runs to be executed.
Returns:
A 3-tuple (total trials, inside trials, 0).
The final zero is needed solely to make sure that the combine_results function
has same type for inputs and outputs (a requirement for combiner functions).
"""
inside_runs = 0
for _ in xrange(runs):
x = random.uniform(0, 1)
y = random.uniform(0, 1)
inside_runs += 1 if x * x + y * y <= 1.0 else 0
return runs, inside_runs, 0
@beam.typehints.with_output_types(Tuple[int, int, float])
@beam.typehints.with_input_types(Iterable[Tuple[int, int, Any]])
def combine_results(results):
"""Combiner function to sum up trials and compute the estimate.
Args:
results: An iterable of 3-tuples (total trials, inside trials, ignored).
Returns:
A 3-tuple containing the sum of total trials, sum of inside trials, and
the probability computed from the two numbers.
"""
# TODO(silviuc): Do we guarantee that argument can be iterated repeatedly?
# Should document one way or the other.
total, inside = sum(r[0] for r in results), sum(r[1] for r in results)
return total, inside, 4 * float(inside) / total
class JsonCoder(object):
"""A JSON coder used to format the final result."""
def encode(self, x):
return json.dumps(x)
class EstimatePiTransform(beam.PTransform):
"""Runs 10M trials, and combine the results to estimate pi."""
def __init__(self, tries_per_work_item=100000):
self.tries_per_work_item = tries_per_work_item
def expand(self, pcoll):
# A hundred work items of a hundred thousand tries each.
return (pcoll
| 'Initialize' >> beam.Create(
[self.tries_per_work_item] * 100).with_output_types(int)
| 'Run trials' >> beam.Map(run_trials)
| 'Sum' >> beam.CombineGlobally(combine_results).without_defaults())
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
(p # pylint: disable=expression-not-assigned
| EstimatePiTransform()
| WriteToText(known_args.output, coder=JsonCoder()))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
thyyks/node-gyp | refs/heads/master | gyp/pylib/gyp/MSVSUserFile.py | 2710 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
wy1iu/sphereface | refs/heads/master | tools/caffe-sphereface/src/caffe/test/test_data/generate_sample_data.py | 38 | """
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_data_2_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
|
tojon/treeherder | refs/heads/master | treeherder/webapp/api/push.py | 3 | import datetime
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.exceptions import ParseError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import (HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND)
from serializers import (CommitSerializer,
PushSerializer)
from treeherder.model.models import (Commit,
Job,
Push,
Repository)
from treeherder.model.tasks import (publish_job_action,
publish_push_action,
publish_push_runnable_job_action)
from treeherder.webapp.api import permissions
from treeherder.webapp.api.utils import (to_datetime,
to_timestamp)
class PushViewSet(viewsets.ViewSet):
"""
View for ``push`` records
"""
throttle_scope = 'push'
permission_classes = (permissions.HasHawkPermissionsOrReadOnly,)
def list(self, request, project):
"""
GET method for list of ``push`` records with revisions
"""
# What is the upper limit on the number of pushes returned by the api
MAX_PUSH_COUNT = 1000
# make a mutable copy of these params
filter_params = request.query_params.copy()
# This will contain some meta data about the request and results
meta = {}
# support ranges for date as well as revisions(changes) like old tbpl
for param in ["fromchange", "tochange", "startdate", "enddate", "revision"]:
v = filter_params.get(param, None)
if v:
del(filter_params[param])
meta[param] = v
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response({
"detail": "No project with name {}".format(project)
}, status=HTTP_404_NOT_FOUND)
pushes = Push.objects.filter(repository=repository).order_by('-time')
for (param, value) in meta.iteritems():
if param == 'fromchange':
frompush_time = Push.objects.values_list('time', flat=True).get(
repository=repository, revision__startswith=value)
pushes = pushes.filter(time__gte=frompush_time)
filter_params.update({
"push_timestamp__gte": to_timestamp(frompush_time)
})
elif param == 'tochange':
topush_time = Push.objects.values_list('time', flat=True).get(
repository=repository, revision__startswith=value)
pushes = pushes.filter(time__lte=topush_time)
filter_params.update({
"push_timestamp__lte": to_timestamp(topush_time)
})
elif param == 'startdate':
pushes = pushes.filter(time__gte=to_datetime(value))
filter_params.update({
"push_timestamp__gte": to_timestamp(to_datetime(value))
})
elif param == 'enddate':
real_end_date = to_datetime(value) + datetime.timedelta(days=1)
pushes = pushes.filter(time__lte=real_end_date)
filter_params.update({
"push_timestamp__lt": to_timestamp(real_end_date)
})
elif param == 'revision':
# revision can be either the revision of the push itself, or
# any of the commits it refers to
pushes = pushes.filter(commits__revision__startswith=value)
rev_key = "revisions_long_revision" \
if len(meta['revision']) == 40 else "revisions_short_revision"
filter_params.update({rev_key: meta['revision']})
for param in ['push_timestamp__lt', 'push_timestamp__lte',
'push_timestamp__gt', 'push_timestamp__gte']:
if filter_params.get(param):
# translate push timestamp directly into a filter
try:
value = datetime.datetime.fromtimestamp(
float(filter_params.get(param)))
except ValueError:
return Response({
"error": "Invalid timestamp specified for {}".format(
param)
}, status=HTTP_400_BAD_REQUEST)
pushes = pushes.filter(**{
param.replace('push_timestamp', 'time'): value
})
for param in ['id__lt', 'id__lte', 'id__gt', 'id__gte', 'id']:
try:
value = int(filter_params.get(param, 0))
except ValueError:
return Response({
"error": "Invalid timestamp specified for {}".format(
param)
}, status=HTTP_400_BAD_REQUEST)
if value:
pushes = pushes.filter(**{param: value})
id_in = filter_params.get("id__in")
if id_in:
try:
id_in_list = [int(id) for id in id_in.split(',')]
except ValueError:
return Response({"error": "Invalid id__in specification"},
status=HTTP_400_BAD_REQUEST)
pushes = pushes.filter(id__in=id_in_list)
author = filter_params.get("author")
if author:
pushes = pushes.filter(author=author)
try:
count = int(filter_params.get("count", 10))
except ValueError:
return Response({"error": "Valid count value required"},
status=HTTP_400_BAD_REQUEST)
if count > MAX_PUSH_COUNT:
msg = "Specified count exceeds api limit: {}".format(MAX_PUSH_COUNT)
return Response({"error": msg}, status=HTTP_400_BAD_REQUEST)
# we used to have a "full" parameter for this endpoint so you could
# specify to not fetch the revision information if it was set to
# false. however AFAIK no one ever used it (default was to fetch
# everything), so let's just leave it out. it doesn't break
# anything to send extra data when not required.
pushes = pushes.select_related('repository').prefetch_related('commits')[:count]
serializer = PushSerializer(pushes, many=True)
meta['count'] = len(pushes)
meta['repository'] = project
meta['filter_params'] = filter_params
resp = {
'meta': meta,
'results': serializer.data
}
return Response(resp)
def retrieve(self, request, project, pk=None):
"""
GET method implementation for detail view of ``push``
"""
try:
push = Push.objects.get(repository__name=project,
id=pk)
serializer = PushSerializer(push)
return Response(serializer.data)
except Push.DoesNotExist:
return Response("No push with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
@detail_route()
def revisions(self, request, project, pk=None):
"""
GET method for revisions of a push
"""
try:
serializer = CommitSerializer(Commit.objects.filter(push_id=pk),
many=True)
return Response(serializer.data)
except Commit.DoesNotExist:
return Response("No push with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def cancel_all(self, request, project, pk=None):
"""
Cancel all pending and running jobs in this push
"""
if not pk: # pragma nocover
return Response({"message": "push id required"}, status=HTTP_400_BAD_REQUEST)
# Sending 'cancel_all' action to pulse. Right now there is no listener
# for this, so we cannot remove 'cancel' action for each job below.
publish_push_action.apply_async(
args=[project, 'cancel_all', pk, request.user.email],
routing_key='publish_to_pulse'
)
# Notify the build systems which created these jobs...
for job in Job.objects.filter(push_id=pk).exclude(state='completed'):
publish_job_action.apply_async(
args=[project, 'cancel', job.id, request.user.email],
routing_key='publish_to_pulse'
)
# Mark pending jobs as cancelled to work around buildbot not including
# cancelled jobs in builds-4hr if they never started running.
# TODO: Remove when we stop using buildbot.
Job.objects.filter(push_id=pk, state='pending').update(
state='completed',
result='usercancel',
last_modified=datetime.datetime.now())
return Response({"message": "pending and running jobs canceled for push '{0}'".format(pk)})
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def trigger_missing_jobs(self, request, project, pk=None):
"""
Trigger jobs that are missing in a push.
"""
if not pk:
return Response({"message": "push id required"}, status=HTTP_400_BAD_REQUEST)
publish_push_action.apply_async(
args=[project, "trigger_missing_jobs", pk, request.user.email],
routing_key='publish_to_pulse'
)
return Response({"message": "Missing jobs triggered for push '{0}'".format(pk)})
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def trigger_all_talos_jobs(self, request, project, pk=None):
"""
Trigger all the talos jobs in a push.
"""
if not pk:
return Response({"message": "push id required"}, status=HTTP_400_BAD_REQUEST)
times = int(request.query_params.get('times', None))
if not times:
raise ParseError(detail="The 'times' parameter is mandatory for this endpoint")
publish_push_action.apply_async(
args=[project, "trigger_all_talos_jobs", pk, request.user.email,
times],
routing_key='publish_to_pulse'
)
return Response({"message": "Talos jobs triggered for push '{0}'".format(pk)})
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def trigger_runnable_jobs(self, request, project, pk=None):
"""
Add new jobs to a push.
"""
if not pk:
return Response({"message": "push id required"},
status=HTTP_400_BAD_REQUEST)
# Making sure a push with this id exists
if not Push.objects.filter(id=pk).exists():
return Response({"message": "No push with id: {0}".format(pk)},
status=HTTP_404_NOT_FOUND)
requested_jobs = request.data.get('requested_jobs', [])
decision_task_id = request.data.get('decision_task_id', [])
if not requested_jobs:
Response({"message": "The list of requested_jobs cannot be empty"},
status=HTTP_400_BAD_REQUEST)
publish_push_runnable_job_action.apply_async(
args=[project, pk, request.user.email, requested_jobs, decision_task_id],
routing_key='publish_to_pulse'
)
return Response({"message": "New jobs added for push '{0}'".format(pk)})
@detail_route()
def status(self, request, project, pk=None):
"""
Return a count of the jobs belonging to this push
grouped by job status.
"""
try:
push = Push.objects.get(id=pk)
except Push.DoesNotExist:
return Response("No push with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
return Response(push.get_status())
|
edx/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/core/serializers/json.py | 204 | """
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
|
citrix-openstack-build/neutron | refs/heads/master | neutron/plugins/cisco/common/cisco_credentials_v2.py | 4 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import logging as LOG
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cexc
from neutron.plugins.cisco.common import config
from neutron.plugins.cisco.db import network_db_v2 as cdb
LOG.basicConfig(level=LOG.WARN)
LOG.getLogger(const.LOGGER_COMPONENT_NAME)
class Store(object):
"""Credential Store."""
@staticmethod
def initialize():
dev_dict = config.get_device_dictionary()
for key in dev_dict:
dev_id, dev_ip, dev_key = key
if dev_key == const.USERNAME:
try:
cdb.add_credential(
dev_ip,
dev_dict[dev_id, dev_ip, const.USERNAME],
dev_dict[dev_id, dev_ip, const.PASSWORD],
dev_id)
except cexc.CredentialAlreadyExists:
# We are quietly ignoring this, since it only happens
# if this class module is loaded more than once, in
# which case, the credentials are already populated
pass
@staticmethod
def put_credential(cred_name, username, password):
"""Set the username and password."""
cdb.add_credential(cred_name, username, password)
@staticmethod
def get_username(cred_name):
"""Get the username."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_USERNAME]
@staticmethod
def get_password(cred_name):
"""Get the password."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_PASSWORD]
@staticmethod
def get_credential(cred_name):
"""Get the username and password."""
cdb.get_credential_name(cred_name)
return {const.USERNAME: const.CREDENTIAL_USERNAME,
const.PASSWORD: const.CREDENTIAL_PASSWORD}
@staticmethod
def delete_credential(cred_name):
"""Delete a credential."""
cdb.remove_credential(cred_name)
|
jigarkb/CTCI | refs/heads/master | LeetCode/002-M-AddTwoNumbers.py | 2 | # You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse
# order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
#
# You may assume the two numbers do not contain any leading zero, except the number 0 itself.
#
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 is None:
return l2
elif l2 is None:
return l1
else:
carry_over = 0
l1_pointer = l1
l2_pointer = l2
l3 = None
l3_pointer = None
while True:
if l1_pointer is None and l2_pointer is None:
if carry_over == 0:
break
else:
interim_sum = carry_over
elif l1_pointer is None:
interim_sum = l2_pointer.val + carry_over
l2_pointer = l2_pointer.next
elif l2_pointer is None:
interim_sum = l1_pointer.val + carry_over
l1_pointer = l1_pointer.next
else:
interim_sum = l1_pointer.val + l2_pointer.val + carry_over
l1_pointer = l1_pointer.next
l2_pointer = l2_pointer.next
carry_over = interim_sum / 10
value = interim_sum % 10
if l3 is None:
l3 = ListNode(value)
l3_pointer = l3
elif l3_pointer.next is None:
l3_pointer.next = ListNode(value)
l3_pointer = l3_pointer.next
return l3
# Note:
# Add respective values and carry forward the tens place to next set of values
# Example: 243 + 564 => 2+5 = 7 carry=0, 4+6+0 = 0 carry=1, 3+4+1 = 8 carry 0
# Keep in mind that length of two list can be different
|
furf/pledge_service | refs/heads/master | testlib/waitress/tests/fixtureapps/sleepy.py | 40 | import time
def app(environ, start_response): # pragma: no cover
if environ['PATH_INFO'] == '/sleepy':
time.sleep(2)
body = b'sleepy returned'
else:
body = b'notsleepy returned'
cl = str(len(body))
start_response(
'200 OK',
[('Content-Length', cl), ('Content-Type', 'text/plain')]
)
return [body]
|
hansenmakangiras/disperindag | refs/heads/master | static/assets/node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
735tesla/SneakPeep | refs/heads/master | unidecode/x086.py | 252 | data = (
'Tuo ', # 0x00
'Wu ', # 0x01
'Rui ', # 0x02
'Rui ', # 0x03
'Qi ', # 0x04
'Heng ', # 0x05
'Lu ', # 0x06
'Su ', # 0x07
'Tui ', # 0x08
'Mang ', # 0x09
'Yun ', # 0x0a
'Pin ', # 0x0b
'Yu ', # 0x0c
'Xun ', # 0x0d
'Ji ', # 0x0e
'Jiong ', # 0x0f
'Xian ', # 0x10
'Mo ', # 0x11
'Hagi ', # 0x12
'Su ', # 0x13
'Jiong ', # 0x14
'[?] ', # 0x15
'Nie ', # 0x16
'Bo ', # 0x17
'Rang ', # 0x18
'Yi ', # 0x19
'Xian ', # 0x1a
'Yu ', # 0x1b
'Ju ', # 0x1c
'Lian ', # 0x1d
'Lian ', # 0x1e
'Yin ', # 0x1f
'Qiang ', # 0x20
'Ying ', # 0x21
'Long ', # 0x22
'Tong ', # 0x23
'Wei ', # 0x24
'Yue ', # 0x25
'Ling ', # 0x26
'Qu ', # 0x27
'Yao ', # 0x28
'Fan ', # 0x29
'Mi ', # 0x2a
'Lan ', # 0x2b
'Kui ', # 0x2c
'Lan ', # 0x2d
'Ji ', # 0x2e
'Dang ', # 0x2f
'Katsura ', # 0x30
'Lei ', # 0x31
'Lei ', # 0x32
'Hua ', # 0x33
'Feng ', # 0x34
'Zhi ', # 0x35
'Wei ', # 0x36
'Kui ', # 0x37
'Zhan ', # 0x38
'Huai ', # 0x39
'Li ', # 0x3a
'Ji ', # 0x3b
'Mi ', # 0x3c
'Lei ', # 0x3d
'Huai ', # 0x3e
'Luo ', # 0x3f
'Ji ', # 0x40
'Kui ', # 0x41
'Lu ', # 0x42
'Jian ', # 0x43
'San ', # 0x44
'[?] ', # 0x45
'Lei ', # 0x46
'Quan ', # 0x47
'Xiao ', # 0x48
'Yi ', # 0x49
'Luan ', # 0x4a
'Men ', # 0x4b
'Bie ', # 0x4c
'Hu ', # 0x4d
'Hu ', # 0x4e
'Lu ', # 0x4f
'Nue ', # 0x50
'Lu ', # 0x51
'Si ', # 0x52
'Xiao ', # 0x53
'Qian ', # 0x54
'Chu ', # 0x55
'Hu ', # 0x56
'Xu ', # 0x57
'Cuo ', # 0x58
'Fu ', # 0x59
'Xu ', # 0x5a
'Xu ', # 0x5b
'Lu ', # 0x5c
'Hu ', # 0x5d
'Yu ', # 0x5e
'Hao ', # 0x5f
'Jiao ', # 0x60
'Ju ', # 0x61
'Guo ', # 0x62
'Bao ', # 0x63
'Yan ', # 0x64
'Zhan ', # 0x65
'Zhan ', # 0x66
'Kui ', # 0x67
'Ban ', # 0x68
'Xi ', # 0x69
'Shu ', # 0x6a
'Chong ', # 0x6b
'Qiu ', # 0x6c
'Diao ', # 0x6d
'Ji ', # 0x6e
'Qiu ', # 0x6f
'Cheng ', # 0x70
'Shi ', # 0x71
'[?] ', # 0x72
'Di ', # 0x73
'Zhe ', # 0x74
'She ', # 0x75
'Yu ', # 0x76
'Gan ', # 0x77
'Zi ', # 0x78
'Hong ', # 0x79
'Hui ', # 0x7a
'Meng ', # 0x7b
'Ge ', # 0x7c
'Sui ', # 0x7d
'Xia ', # 0x7e
'Chai ', # 0x7f
'Shi ', # 0x80
'Yi ', # 0x81
'Ma ', # 0x82
'Xiang ', # 0x83
'Fang ', # 0x84
'E ', # 0x85
'Pa ', # 0x86
'Chi ', # 0x87
'Qian ', # 0x88
'Wen ', # 0x89
'Wen ', # 0x8a
'Rui ', # 0x8b
'Bang ', # 0x8c
'Bi ', # 0x8d
'Yue ', # 0x8e
'Yue ', # 0x8f
'Jun ', # 0x90
'Qi ', # 0x91
'Ran ', # 0x92
'Yin ', # 0x93
'Qi ', # 0x94
'Tian ', # 0x95
'Yuan ', # 0x96
'Jue ', # 0x97
'Hui ', # 0x98
'Qin ', # 0x99
'Qi ', # 0x9a
'Zhong ', # 0x9b
'Ya ', # 0x9c
'Ci ', # 0x9d
'Mu ', # 0x9e
'Wang ', # 0x9f
'Fen ', # 0xa0
'Fen ', # 0xa1
'Hang ', # 0xa2
'Gong ', # 0xa3
'Zao ', # 0xa4
'Fu ', # 0xa5
'Ran ', # 0xa6
'Jie ', # 0xa7
'Fu ', # 0xa8
'Chi ', # 0xa9
'Dou ', # 0xaa
'Piao ', # 0xab
'Xian ', # 0xac
'Ni ', # 0xad
'Te ', # 0xae
'Qiu ', # 0xaf
'You ', # 0xb0
'Zha ', # 0xb1
'Ping ', # 0xb2
'Chi ', # 0xb3
'You ', # 0xb4
'He ', # 0xb5
'Han ', # 0xb6
'Ju ', # 0xb7
'Li ', # 0xb8
'Fu ', # 0xb9
'Ran ', # 0xba
'Zha ', # 0xbb
'Gou ', # 0xbc
'Pi ', # 0xbd
'Bo ', # 0xbe
'Xian ', # 0xbf
'Zhu ', # 0xc0
'Diao ', # 0xc1
'Bie ', # 0xc2
'Bing ', # 0xc3
'Gu ', # 0xc4
'Ran ', # 0xc5
'Qu ', # 0xc6
'She ', # 0xc7
'Tie ', # 0xc8
'Ling ', # 0xc9
'Gu ', # 0xca
'Dan ', # 0xcb
'Gu ', # 0xcc
'Ying ', # 0xcd
'Li ', # 0xce
'Cheng ', # 0xcf
'Qu ', # 0xd0
'Mou ', # 0xd1
'Ge ', # 0xd2
'Ci ', # 0xd3
'Hui ', # 0xd4
'Hui ', # 0xd5
'Mang ', # 0xd6
'Fu ', # 0xd7
'Yang ', # 0xd8
'Wa ', # 0xd9
'Lie ', # 0xda
'Zhu ', # 0xdb
'Yi ', # 0xdc
'Xian ', # 0xdd
'Kuo ', # 0xde
'Jiao ', # 0xdf
'Li ', # 0xe0
'Yi ', # 0xe1
'Ping ', # 0xe2
'Ji ', # 0xe3
'Ha ', # 0xe4
'She ', # 0xe5
'Yi ', # 0xe6
'Wang ', # 0xe7
'Mo ', # 0xe8
'Qiong ', # 0xe9
'Qie ', # 0xea
'Gui ', # 0xeb
'Gong ', # 0xec
'Zhi ', # 0xed
'Man ', # 0xee
'Ebi ', # 0xef
'Zhi ', # 0xf0
'Jia ', # 0xf1
'Rao ', # 0xf2
'Si ', # 0xf3
'Qi ', # 0xf4
'Xing ', # 0xf5
'Lie ', # 0xf6
'Qiu ', # 0xf7
'Shao ', # 0xf8
'Yong ', # 0xf9
'Jia ', # 0xfa
'Shui ', # 0xfb
'Che ', # 0xfc
'Bai ', # 0xfd
'E ', # 0xfe
'Han ', # 0xff
)
|
gopal1cloud/neutron | refs/heads/master | neutron/api/versions.py | 18 | # Copyright 2011 Citrix Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.dec
from neutron.api.views import versions as versions_view
from neutron.openstack.common import gettextutils
from neutron.openstack.common import log as logging
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Versions(object):
@classmethod
def factory(cls, global_config, **local_config):
return cls()
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all Neutron API versions."""
version_objs = [
{
"id": "v2.0",
"status": "CURRENT",
},
]
if req.path != '/':
language = req.best_match_language()
msg = _('Unknown API version specified')
msg = gettextutils.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
builder = versions_view.get_view_builder(req)
versions = [builder.build(version) for version in version_objs]
response = dict(versions=versions)
metadata = {
"application/xml": {
"attributes": {
"version": ["status", "id"],
"link": ["rel", "href"],
}
}
}
content_type = req.best_match_content_type()
body = (wsgi.Serializer(metadata=metadata).
serialize(response, content_type))
response = webob.Response()
response.content_type = content_type
response.body = body
return response
|
BitBotFactory/poloniexlendingbot | refs/heads/master | modules/ConsoleUtils.py | 5 | # coding=utf-8
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
|
nirb/whatsapp | refs/heads/master | yowsup/layers/protocol_presence/layer.py | 8 | from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
class YowPresenceProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"presence": (self.recvPresence, self.sendPresence)
}
super(YowPresenceProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Presence Layer"
def sendPresence(self, entity):
self.entityToLower(entity)
def recvPresence(self, node):
pass
#self.toUpper(IncomingAckProtocolEntity.fromProtocolTreeNode(node))
|
yask123/django | refs/heads/master | django/contrib/flatpages/migrations/0001_initial.py | 308 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
|
akhilari7/pa-dude | refs/heads/master | lib/python2.7/site-packages/nltk/sem/lfg.py | 10 | # Natural Language Toolkit: Lexical Functional Grammar
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division, unicode_literals
from nltk.internals import Counter
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class FStructure(dict):
def safeappend(self, key, item):
"""
Append 'item' to the list at 'key'. If no list exists for 'key', then
construct one.
"""
if key not in self:
self[key] = []
self[key].append(item)
def __setitem__(self, key, value):
dict.__setitem__(self, key.lower(), value)
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def to_glueformula_list(self, glue_dict):
depgraph = self.to_depgraph()
return glue_dict.to_glueformula_list(depgraph)
def to_depgraph(self, rel=None):
from nltk.parse.dependencygraph import DependencyGraph
depgraph = DependencyGraph()
nodes = depgraph.nodes
self._to_depgraph(nodes, 0, 'ROOT')
# Add all the dependencies for all the nodes
for address, node in nodes.items():
for n2 in (n for n in nodes.values() if n['rel'] != 'TOP'):
if n2['head'] == address:
relation = n2['rel']
node['deps'].setdefault(relation,[])
node['deps'][relation].append(n2['address'])
depgraph.root = nodes[1]
return depgraph
def _to_depgraph(self, nodes, head, rel):
index = len(nodes)
nodes[index].update(
{
'address': index,
'word': self.pred[0],
'tag': self.pred[1],
'head': head,
'rel': rel,
}
)
for feature in sorted(self):
for item in sorted(self[feature]):
if isinstance(item, FStructure):
item._to_depgraph(nodes, index, feature)
elif isinstance(item, tuple):
new_index = len(nodes)
nodes[new_index].update(
{
'address': new_index,
'word': item[0],
'tag': item[1],
'head': index,
'rel': feature,
}
)
elif isinstance(item, list):
for n in item:
n._to_depgraph(nodes, index, feature)
else:
raise Exception('feature %s is not an FStruct, a list, or a tuple' % feature)
@staticmethod
def read_depgraph(depgraph):
return FStructure._read_depgraph(depgraph.root, depgraph)
@staticmethod
def _read_depgraph(node, depgraph, label_counter=None, parent=None):
if not label_counter:
label_counter = Counter()
if node['rel'].lower() in ['spec', 'punct']:
# the value of a 'spec' entry is a word, not an FStructure
return (node['word'], node['tag'])
else:
fstruct = FStructure()
fstruct.pred = None
fstruct.label = FStructure._make_label(label_counter.get())
fstruct.parent = parent
word, tag = node['word'], node['tag']
if tag[:2] == 'VB':
if tag[2:3] == 'D':
fstruct.safeappend('tense', ('PAST', 'tense'))
fstruct.pred = (word, tag[:2])
if not fstruct.pred:
fstruct.pred = (word, tag)
children = [depgraph.nodes[idx] for idx in sum(list(node['deps'].values()), [])]
for child in children:
fstruct.safeappend(child['rel'], FStructure._read_depgraph(child, depgraph, label_counter, fstruct))
return fstruct
@staticmethod
def _make_label(value):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
letter = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s',
't','u','v','w','x','y','z','a','b','c','d','e'][value-1]
num = int(value) // 26
if num > 0:
return letter + str(num)
else:
return letter
def __repr__(self):
return self.__unicode__().replace('\n', '')
def __str__(self):
return self.pretty_format()
def pretty_format(self, indent=3):
try:
accum = '%s:[' % self.label
except NameError:
accum = '['
try:
accum += 'pred \'%s\'' % (self.pred[0])
except NameError:
pass
for feature in sorted(self):
for item in self[feature]:
if isinstance(item, FStructure):
next_indent = indent+len(feature)+3+len(self.label)
accum += '\n%s%s %s' % (' '*(indent), feature, item.pretty_format(next_indent))
elif isinstance(item, tuple):
accum += '\n%s%s \'%s\'' % (' '*(indent), feature, item[0])
elif isinstance(item, list):
accum += '\n%s%s {%s}' % (' '*(indent), feature, ('\n%s' % (' '*(indent+len(feature)+2))).join(item))
else: # ERROR
raise Exception('feature %s is not an FStruct, a list, or a tuple' % feature)
return accum+']'
def demo_read_depgraph():
from nltk.parse.dependencygraph import DependencyGraph
dg1 = DependencyGraph("""\
Esso NNP 2 SUB
said VBD 0 ROOT
the DT 5 NMOD
Whiting NNP 5 NMOD
field NN 6 SUB
started VBD 2 VMOD
production NN 6 OBJ
Tuesday NNP 6 VMOD
""")
dg2 = DependencyGraph("""\
John NNP 2 SUB
sees VBP 0 ROOT
Mary NNP 2 OBJ
""")
dg3 = DependencyGraph("""\
a DT 2 SPEC
man NN 3 SUBJ
walks VB 0 ROOT
""")
dg4 = DependencyGraph("""\
every DT 2 SPEC
girl NN 3 SUBJ
chases VB 0 ROOT
a DT 5 SPEC
dog NN 3 OBJ
""")
depgraphs = [dg1,dg2,dg3,dg4]
for dg in depgraphs:
print(FStructure.read_depgraph(dg))
if __name__ == '__main__':
demo_read_depgraph()
|
fejta/test-infra | refs/heads/master | gubernator/view_pr.py | 20 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
import time
import filters
import gcs_async
import github.models as ghm
import pull_request
import view_base
import view_build
@view_base.memcache_memoize('pr-details://', expires=60 * 3)
def pr_builds(path):
"""Return {job: [(build, {started.json}, {finished.json})]} for each job under gcs path."""
jobs_dirs_fut = gcs_async.listdirs(path)
def base(path):
return os.path.basename(os.path.dirname(path))
jobs_futures = [(job, gcs_async.listdirs(job)) for job in jobs_dirs_fut.get_result()]
futures = []
for job, builds_fut in jobs_futures:
for build in builds_fut.get_result():
futures.append([
base(job),
base(build),
gcs_async.read('/%sstarted.json' % build),
gcs_async.read('/%sfinished.json' % build)])
futures.sort(key=lambda (job, build, s, f): (job, view_base.pad_numbers(build)), reverse=True)
jobs = {}
for job, build, started_fut, finished_fut in futures:
started, finished = view_build.normalize_metadata(started_fut, finished_fut)
jobs.setdefault(job, []).append((build, started, finished))
return jobs
def pr_path(org, repo, pr, default_org, default_repo, pull_prefix):
"""Builds the correct gs://prefix/maybe_kubernetes/maybe_repo_org/pr."""
if org == default_org and repo == default_repo:
return '%s/%s' % (pull_prefix, pr)
if org == default_org:
return '%s/%s/%s' % (pull_prefix, repo, pr)
return '%s/%s_%s/%s' % (pull_prefix, org, repo, pr)
def org_repo(path, default_org, default_repo):
"""Converts /maybe_org/maybe_repo into (org, repo)."""
parts = path.split('/')[1:]
if len(parts) == 2:
org, repo = parts
elif len(parts) == 1:
org = default_org
repo = parts[0]
else:
org = default_org
repo = default_repo
return org, repo
def get_pull_prefix(config, org):
if org in config['external_services']:
return config['external_services'][org]['gcs_pull_prefix']
return config['default_external_services']['gcs_pull_prefix']
class PRHandler(view_base.BaseHandler):
"""Show a list of test runs for a PR."""
def get(self, path, pr):
# pylint: disable=too-many-locals
org, repo = org_repo(path=path,
default_org=self.app.config['default_org'],
default_repo=self.app.config['default_repo'],
)
path = pr_path(org=org, repo=repo, pr=pr,
pull_prefix=get_pull_prefix(self.app.config, org),
default_org=self.app.config['default_org'],
default_repo=self.app.config['default_repo'],
)
builds = pr_builds(path)
# TODO(fejta): assume all builds are monotonically increasing.
for bs in builds.itervalues():
if any(len(b) > 8 for b, _, _ in bs):
bs.sort(key=lambda (b, s, f): -(s or {}).get('timestamp', 0))
if pr == 'batch': # truncate batch results to last day
cutoff = time.time() - 60 * 60 * 24
builds = {}
for job, job_builds in builds.iteritems():
builds[job] = [
(b, s, f) for b, s, f in job_builds
if not s or s.get('timestamp') > cutoff
]
max_builds, headings, rows = pull_request.builds_to_table(builds)
digest = ghm.GHIssueDigest.get('%s/%s' % (org, repo), pr)
self.render(
'pr.html',
dict(
pr=pr,
digest=digest,
max_builds=max_builds,
header=headings,
org=org,
repo=repo,
rows=rows,
path=path,
)
)
def get_acks(login, prs):
acks = {}
result = ghm.GHUserState.make_key(login).get()
if result:
acks = result.acks
if prs:
# clear acks for PRs that user is no longer involved in.
stale = set(acks) - set(pr.key.id() for pr in prs)
if stale:
for key in stale:
result.acks.pop(key)
result.put()
return acks
class InsensitiveString(str):
"""A string that uses str.lower() to compare itself to others.
Does not override __in__ (that uses hash()) or sorting."""
def __eq__(self, other):
try:
return other.lower() == self.lower()
except AttributeError:
return str.__eq__(self, other)
class PRDashboard(view_base.BaseHandler):
def get(self, user=None):
# pylint: disable=singleton-comparison
login = self.session.get('user')
if not user:
user = login
if not user:
self.redirect('/github_auth/pr')
return
logging.debug('user=%s', user)
elif user == 'all':
user = None
qs = [ghm.GHIssueDigest.is_pr == True]
if not self.request.get('all', False):
qs.append(ghm.GHIssueDigest.is_open == True)
if user:
qs.append(ghm.GHIssueDigest.involved == user.lower())
prs = list(ghm.GHIssueDigest.query(*qs).fetch(batch_size=200))
prs.sort(key=lambda x: x.updated_at, reverse=True)
acks = None
if login and user == login: # user getting their own page
acks = get_acks(login, prs)
fmt = self.request.get('format', 'html')
if fmt == 'json':
self.response.headers['Content-Type'] = 'application/json'
def serial(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, ghm.GHIssueDigest):
# pylint: disable=protected-access
keys = ['repo', 'number'] + list(obj._values)
return {k: getattr(obj, k) for k in keys}
raise TypeError
self.response.write(json.dumps(prs, sort_keys=True, default=serial, indent=True))
elif fmt == 'html':
if user:
user = InsensitiveString(user)
def acked(p):
if filters.has_lgtm_without_missing_approval(p, user):
# LGTM is an implicit Ack (suppress from incoming)...
# if it doesn't also need approval
return True
if acks is None:
return False
return filters.do_get_latest(p.payload, user) <= acks.get(p.key.id(), 0)
def needs_attention(p):
labels = p.payload.get('labels', {})
for u, reason in p.payload['attn'].iteritems():
if user == u: # case insensitive compare
if acked(p):
continue # hide acked PRs
if reason == 'needs approval' and 'lgtm' not in labels:
continue # hide PRs that need approval but haven't been LGTMed yet
return True
return False
cats = [
('Needs Attention', needs_attention, ''),
('Approvable', lambda p: user in p.payload.get('approvers', []),
'is:open is:pr ("additional approvers: {0}" ' +
'OR "additional approver: {0}")'.format(user)),
('Incoming', lambda p: user != p.payload['author'] and
user in p.payload['assignees'],
'is:open is:pr user:kubernetes assignee:%s' % user),
('Outgoing', lambda p: user == p.payload['author'],
'is:open is:pr user:kubernetes author:%s' % user),
]
else:
cats = [('Open Kubernetes PRs', lambda x: True,
'is:open is:pr user:kubernetes')]
milestone = self.request.get('milestone')
milestones = {p.payload.get('milestone') for p in prs} - {None}
if milestone:
prs = [pr for pr in prs if pr.payload.get('milestone') == milestone]
self.render('pr_dashboard.html', dict(
prs=prs, cats=cats, user=user, login=login, acks=acks,
milestone=milestone, milestones=milestones))
else:
self.abort(406)
def post(self):
login = self.session.get('user')
if not login:
self.abort(403)
state = ghm.GHUserState.make_key(login).get()
if state is None:
state = ghm.GHUserState.make(login)
body = json.loads(self.request.body)
if body['command'] == 'ack':
delta = {'%s %s' % (body['repo'], body['number']): body['latest']}
state.acks.update(delta)
state.put()
elif body['command'] == 'ack-clear':
state.acks = {}
state.put()
else:
self.abort(400)
class PRBuildLogHandler(view_base.BaseHandler):
def get(self, path):
org, _ = org_repo(path=path,
default_org=self.app.config['default_org'],
default_repo=self.app.config['default_repo'],
)
self.redirect('https://storage.googleapis.com/%s/%s' % (
get_pull_prefix(self.app.config, org), path
))
|
hellhound/dentexchange | refs/heads/master | dentexchange/apps/libs/haystack/utils.py | 2 | # -*- coding:utf-8 -*-
from django.db.models.loading import get_model
from django.core.exceptions import ImproperlyConfigured
from haystack import connections, connection_router
from haystack.utils import get_identifier
from djcelery_transactions import PostTransactionTask as Task
def split_identifier(identifier):
'''
Converts 'notes.note.23' into ('notes.note', 23).
'''
bits = identifier.split('.')
if len(bits) < 2:
return (None, None)
pk = bits[-1]
# In case Django ever handles full paths...
object_path = '.'.join(bits[:-1])
return (object_path, pk)
def get_model_class(object_path):
'''
Fetch the model's class in a standarized way.
'''
bits = object_path.split('.')
app_name = '.'.join(bits[:-1])
classname = bits[-1]
model_class = get_model(app_name, classname)
if model_class is None:
raise ImproperlyConfigured('Could not load model \'%s\'.' %
object_path)
return model_class
def get_instance(model_class, pk):
'''
Fetch the instance in a standarized way.
'''
try:
instance = model_class._default_manager.get(pk=int(pk))
except (model_class.DoesNotExist, model_class.MultipleObjectsReturned):
return None
return instance
def get_instance_from_identifier(identifier):
if isinstance(identifier, basestring):
object_path, pk = split_identifier(identifier)
model_class = get_model_class(object_path)
return get_instance(model_class, pk)
return identifier
def get_indexes(model_class):
using_backends = connection_router.for_write(
models=[model_class])
for using in using_backends:
index_holder = connections[using].get_unified_index()
yield index_holder.get_index(model_class)
class AsyncIndexAdapter(object):
@staticmethod
def remove_object(obj):
HaystackActionTask.delay(HaystackActionTask.REMOVE_ACTION,
get_identifier(obj))
@staticmethod
def update_object(obj):
HaystackActionTask.delay(HaystackActionTask.UPDATE_ACTION,
get_identifier(obj))
class HaystackActionTask(Task):
REMOVE_ACTION = 0
UPDATE_ACTION = 1
def run(self, action, identifier):
instance = get_instance_from_identifier(identifier)
for index in get_indexes(type(instance)):
if action == self.REMOVE_ACTION:
index.remove_object(instance)
else:
index.update_object(instance)
|
alianmohammad/pd-gem5-latest | refs/heads/master | util/stats/stats.py | 77 | #!/usr/bin/env python
# Copyright (c) 2003-2004 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import re, sys, math
def usage():
print '''\
Usage: %s [-E] [-F] [ -G <get> ] [-d <db> ] [-g <graphdir> ] [-h <host>] [-p]
[-s <system>] [-r <runs> ] [-T <samples>] [-u <username>]
<command> [command args]
commands extra parameters description
----------- ------------------ ---------------------------------------
formula <formula> Evaluated formula specified
formulas [regex] List formulas (only matching regex)
runs none List all runs in database
samples none List samples present in database
stability <pairnum> <stats> Calculated statistical info about stats
stat <regex> Show stat data (only matching regex)
stats [regex] List all stats (only matching regex)
database <command> Where command is drop, init, or clean
''' % sys.argv[0]
sys.exit(1)
def getopts(list, flags):
import getopt
try:
opts, args = getopt.getopt(list, flags)
except getopt.GetoptError:
usage()
return opts, args
class CommandException(Exception):
pass
def commands(options, command, args):
if command == 'database':
if len(args) == 0: raise CommandException
import dbinit
mydb = dbinit.MyDB(options)
if args[0] == 'drop':
if len(args) > 2: raise CommandException
mydb.admin()
mydb.drop()
if len(args) == 2 and args[1] == 'init':
mydb.create()
mydb.connect()
mydb.populate()
mydb.close()
return
if args[0] == 'init':
if len(args) > 1: raise CommandException
mydb.admin()
mydb.create()
mydb.connect()
mydb.populate()
mydb.close()
return
if args[0] == 'clean':
if len(args) > 1: raise CommandException
mydb.connect()
mydb.clean()
return
raise CommandException
import db
source = db.Database()
source.host = options.host
source.db = options.db
source.passwd = options.passwd
source.user = options.user
source.connect()
#source.update_dict(globals())
if type(options.method) is str:
source.method = options.method
if options.runs is None:
runs = source.allRuns
else:
rx = re.compile(options.runs)
runs = []
for run in source.allRuns:
if rx.match(run.name):
runs.append(run)
if command == 'runs':
user = None
opts, args = getopts(args, '-u')
if len(args):
raise CommandException
for o,a in opts:
if o == '-u':
user = a
source.listRuns(user)
return
if command == 'stats':
if len(args) == 0:
source.listStats()
elif len(args) == 1:
source.listStats(args[0])
else:
raise CommandException
return
if command == 'formulas':
if len(args) == 0:
source.listFormulas()
elif len(args) == 1:
source.listFormulas(args[0])
else:
raise CommandException
return
if command == 'samples':
if len(args):
raise CommandException
source.listTicks(runs)
return
if command == 'stability':
if len(args) < 2:
raise CommandException
try:
merge = int(args[0])
except ValueError:
usage()
stats = source.getStat(args[1])
source.method = 'sum'
def disp(*args):
print "%-35s %12s %12s %4s %5s %5s %5s %10s" % args
# temporary variable containing a bunch of dashes
d = '-' * 100
#loop through all the stats selected
for stat in stats:
print "%s:" % stat.name
disp("run name", "average", "stdev", ">10%", ">1SDV", ">2SDV",
"SAMP", "CV")
disp(d[:35], d[:12], d[:12], d[:4], d[:5], d[:5], d[:5], d[:10])
#loop through all the selected runs
for run in runs:
runTicks = source.retTicks([ run ])
#throw away the first one, it's 0
runTicks.pop(0)
source.ticks = runTicks
avg = 0
stdev = 0
numoutsideavg = 0
numoutside1std = 0
numoutside2std = 0
pairRunTicks = []
if value(stat, run.run) == 1e300*1e300:
continue
for t in range(0, len(runTicks)-(merge-1), merge):
tempPair = []
for p in range(0,merge):
tempPair.append(runTicks[t+p])
pairRunTicks.append(tempPair)
#loop through all the various ticks for each run
for tick in pairRunTicks:
source.ticks = tick
avg += value(stat, run.run)
avg /= len(pairRunTicks)
for tick in pairRunTicks:
source.ticks = tick
val = value(stat, run.run)
stdev += pow((val-avg),2)
stdev = math.sqrt(stdev / len(pairRunTicks))
for tick in pairRunTicks:
source.ticks = tick
val = value(stat, run.run)
if (val < (avg * .9)) or (val > (avg * 1.1)):
numoutsideavg += 1
if (val < (avg - stdev)) or (val > (avg + stdev)):
numoutside1std += 1
if (val < (avg - (2*stdev))) or (val > (avg + (2*stdev))):
numoutside2std += 1
if avg > 1000:
disp(run.name, "%.1f" % avg, "%.1f" % stdev,
"%d" % numoutsideavg, "%d" % numoutside1std,
"%d" % numoutside2std, "%d" % len(pairRunTicks),
"%.3f" % (stdev/avg*100))
elif avg > 100:
disp(run.name, "%.1f" % avg, "%.1f" % stdev,
"%d" % numoutsideavg, "%d" % numoutside1std,
"%d" % numoutside2std, "%d" % len(pairRunTicks),
"%.5f" % (stdev/avg*100))
else:
disp(run.name, "%.5f" % avg, "%.5f" % stdev,
"%d" % numoutsideavg, "%d" % numoutside1std,
"%d" % numoutside2std, "%d" % len(pairRunTicks),
"%.7f" % (stdev/avg*100))
return
if command == 'all':
if len(args):
raise CommandException
all = [ 'bps', 'misses', 'mpkb', 'ipkb', 'pps', 'bpt' ]
for command in all:
commands(options, command, args)
if options.ticks:
if not options.graph:
print 'only displaying sample %s' % options.ticks
source.ticks = [ int(x) for x in options.ticks.split() ]
from output import StatOutput
output = StatOutput(options.jobfile, source)
output.xlabel = 'System Configuration'
output.colormap = 'RdYlGn'
if command == 'stat' or command == 'formula':
if len(args) != 1:
raise CommandException
if command == 'stat':
stats = source.getStat(args[0])
if command == 'formula':
stats = eval(args[0])
for stat in stats:
output.stat = stat
output.ylabel = stat.name
if options.graph:
output.graph(stat.name, options.graphdir)
else:
output.display(stat.name, options.printmode)
return
if len(args):
raise CommandException
from info import ProxyGroup
proxy = ProxyGroup(system = source[options.system])
system = proxy.system
etherdev = system.tsunami.etherdev0
bytes = etherdev.rxBytes + etherdev.txBytes
kbytes = bytes / 1024
packets = etherdev.rxPackets + etherdev.txPackets
def display():
if options.graph:
output.graph(command, options.graphdir, proxy)
else:
output.display(command, options.printmode)
if command == 'ticks':
output.stat = system.run0.numCycles
display()
return
if command == 'bytes':
output.stat = bytes
display()
return
if command == 'packets':
output.stat = packets
display()
return
if command == 'ppt' or command == 'tpp':
output.stat = packets / system.run0.numCycles
output.invert = command == 'tpp'
display()
return
if command == 'pps':
output.stat = packets / source['sim_seconds']
output.ylabel = 'Packets/s'
display()
return
if command == 'bpt' or command == 'tpb':
output.stat = bytes / system.run0.numCycles * 8
output.ylabel = 'bps / Hz'
output.invert = command == 'tpb'
display()
return
if command in ('rxbps', 'txbps', 'bps'):
if command == 'rxbps':
output.stat = etherdev.rxBandwidth / 1e9
if command == 'txbps':
output.stat = etherdev.txBandwidth / 1e9
if command == 'bps':
output.stat = (etherdev.rxBandwidth + etherdev.txBandwidth) / 1e9
output.ylabel = 'Bandwidth (Gbps)'
output.ylim = [ 0.0, 10.0 ]
display()
return
if command == 'bpp':
output.stat = bytes / packets
output.ylabel = 'Bytes / Packet'
display()
return
if command == 'rxbpp':
output.stat = etherdev.rxBytes / etherdev.rxPackets
output.ylabel = 'Receive Bytes / Packet'
display()
return
if command == 'txbpp':
output.stat = etherdev.txBytes / etherdev.txPackets
output.ylabel = 'Transmit Bytes / Packet'
display()
return
if command == 'rtp':
output.stat = etherdev.rxPackets / etherdev.txPackets
output.ylabel = 'rxPackets / txPackets'
display()
return
if command == 'rtb':
output.stat = etherdev.rxBytes / etherdev.txBytes
output.ylabel = 'rxBytes / txBytes'
display()
return
misses = system.l2.overall_mshr_misses
if command == 'misses':
output.stat = misses
output.ylabel = 'Overall MSHR Misses'
display()
return
if command == 'mpkb':
output.stat = misses / (bytes / 1024)
output.ylabel = 'Misses / KB'
display()
return
if command == 'ipkb':
interrupts = system.run0.kern.faults[4]
output.stat = interrupts / kbytes
output.ylabel = 'Interrupts / KB'
display()
return
if command == 'execute':
output.stat = system.run0.ISSUE__count
display()
return
if command == 'commit':
output.stat = system.run0.COM__count
display()
return
if command == 'fetch':
output.stat = system.run0.FETCH__count
display()
return
raise CommandException
class Options: pass
if __name__ == '__main__':
import getpass
options = Options()
options.host = None
options.db = None
options.passwd = ''
options.user = getpass.getuser()
options.runs = None
options.system = 'client'
options.method = None
options.graph = False
options.ticks = False
options.printmode = 'G'
jobfilename = None
options.jobfile = None
options.all = False
opts, args = getopts(sys.argv[1:], '-EFJad:g:h:j:m:pr:s:u:T:')
for o,a in opts:
if o == '-E':
options.printmode = 'E'
if o == '-F':
options.printmode = 'F'
if o == '-a':
options.all = True
if o == '-d':
options.db = a
if o == '-g':
options.graph = True;
options.graphdir = a
if o == '-h':
options.host = a
if o == '-J':
jobfilename = None
if o == '-j':
jobfilename = a
if o == '-m':
options.method = a
if o == '-p':
options.passwd = getpass.getpass()
if o == '-r':
options.runs = a
if o == '-u':
options.user = a
if o == '-s':
options.system = a
if o == '-T':
options.ticks = a
if jobfilename:
from jobfile import JobFile
options.jobfile = JobFile(jobfilename)
if not options.host:
options.host = options.jobfile.dbhost
if not options.db:
options.db = options.jobfile.statdb
if not options.host:
sys.exit('Database server must be provided from a jobfile or -h')
if not options.db:
sys.exit('Database name must be provided from a jobfile or -d')
if len(args) == 0:
usage()
command = args[0]
args = args[1:]
try:
commands(options, command, args)
except CommandException:
usage()
|
ckwatson/kernel | refs/heads/master | tests/quick_test.py | 1 | import sys
import os
from ..data.molecular_species import molecular_species
from ..data.reaction_mechanism_class import reaction_mechanism
from ..data.condition_class import condition
from ..data.reagent import reagent
from ..data.puzzle_class import puzzle
from ..data.solution_class import solution
def name(class_obj):
return class_obj.__name__
# depends on JSON base class
for class_being_tested in [molecular_species, condition, reaction_mechanism, reagent, puzzle, solution]:
system_output = sys.stdout # store stdout
sys.stdout = open(os.getcwd() + "/testing_result_" + name(class_being_tested) + ".txt", "w") # pipe to file
test_result = class_being_tested.test()
sys.stdout.close() # close file
sys.stdout = system_output #replace stdout
if test_result:
print("PASSED", name(class_being_tested), sep=" ")
else:
print("FAILED", name(class_being_tested), sep=" ")
|
webmasterraj/GaSiProMo | refs/heads/master | flask/lib/python2.7/site-packages/gunicorn/_compat.py | 35 | import sys
from gunicorn import six
PY33 = (sys.version_info >= (3, 3))
def _check_if_pyc(fname):
"""Return True if the extension is .pyc, False if .py
and None if otherwise"""
from imp import find_module
from os.path import realpath, dirname, basename, splitext
# Normalize the file-path for the find_module()
filepath = realpath(fname)
dirpath = dirname(filepath)
module_name = splitext(basename(filepath))[0]
# Validate and fetch
try:
fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])
except ImportError:
raise IOError("Cannot find config file. "
"Path maybe incorrect! : {0}".format(filepath))
return pytype, fileobj, fullpath
def _get_codeobj(pyfile):
""" Returns the code object, given a python file """
from imp import PY_COMPILED, PY_SOURCE
result, fileobj, fullpath = _check_if_pyc(pyfile)
# WARNING:
# fp.read() can blowup if the module is extremely large file.
# Lookout for overflow errors.
try:
data = fileobj.read()
finally:
fileobj.close()
# This is a .pyc file. Treat accordingly.
if result is PY_COMPILED:
# .pyc format is as follows:
# 0 - 4 bytes: Magic number, which changes with each create of .pyc file.
# First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n".
# 4 - 8 bytes: Datetime value, when the .py was last changed.
# 8 - EOF: Marshalled code object data.
# So to get code object, just read the 8th byte onwards till EOF, and
# UN-marshal it.
import marshal
code_obj = marshal.loads(data[8:])
elif result is PY_SOURCE:
# This is a .py file.
code_obj = compile(data, fullpath, 'exec')
else:
# Unsupported extension
raise Exception("Input file is unknown format: {0}".format(fullpath))
# Return code object
return code_obj
if six.PY3:
def execfile_(fname, *args):
if fname.endswith(".pyc"):
code = _get_codeobj(fname)
else:
code = compile(open(fname, 'rb').read(), fname, 'exec')
return six.exec_(code, *args)
def bytes_to_str(b):
if isinstance(b, six.text_type):
return b
return str(b, 'latin1')
import urllib.parse
def unquote_to_wsgi_str(string):
return _unquote_to_bytes(string).decode('latin-1')
_unquote_to_bytes = urllib.parse.unquote_to_bytes
else:
def execfile_(fname, *args):
""" Overriding PY2 execfile() implementation to support .pyc files """
if fname.endswith(".pyc"):
return six.exec_(_get_codeobj(fname), *args)
return execfile(fname, *args)
def bytes_to_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
import urllib
unquote_to_wsgi_str = urllib.unquote
# The following code adapted from trollius.py33_exceptions
def _wrap_error(exc, mapping, key):
if key not in mapping:
return
new_err_cls = mapping[key]
new_err = new_err_cls(*exc.args)
# raise a new exception with the original traceback
if hasattr(exc, '__traceback__'):
traceback = exc.__traceback__
else:
traceback = sys.exc_info()[2]
six.reraise(new_err_cls, new_err, traceback)
if PY33:
import builtins
BlockingIOError = builtins.BlockingIOError
BrokenPipeError = builtins.BrokenPipeError
ChildProcessError = builtins.ChildProcessError
ConnectionRefusedError = builtins.ConnectionRefusedError
ConnectionResetError = builtins.ConnectionResetError
InterruptedError = builtins.InterruptedError
ConnectionAbortedError = builtins.ConnectionAbortedError
PermissionError = builtins.PermissionError
FileNotFoundError = builtins.FileNotFoundError
ProcessLookupError = builtins.ProcessLookupError
def wrap_error(func, *args, **kw):
return func(*args, **kw)
else:
import errno
import select
import socket
class BlockingIOError(OSError):
pass
class BrokenPipeError(OSError):
pass
class ChildProcessError(OSError):
pass
class ConnectionRefusedError(OSError):
pass
class InterruptedError(OSError):
pass
class ConnectionResetError(OSError):
pass
class ConnectionAbortedError(OSError):
pass
class PermissionError(OSError):
pass
class FileNotFoundError(OSError):
pass
class ProcessLookupError(OSError):
pass
_MAP_ERRNO = {
errno.EACCES: PermissionError,
errno.EAGAIN: BlockingIOError,
errno.EALREADY: BlockingIOError,
errno.ECHILD: ChildProcessError,
errno.ECONNABORTED: ConnectionAbortedError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ECONNRESET: ConnectionResetError,
errno.EINPROGRESS: BlockingIOError,
errno.EINTR: InterruptedError,
errno.ENOENT: FileNotFoundError,
errno.EPERM: PermissionError,
errno.EPIPE: BrokenPipeError,
errno.ESHUTDOWN: BrokenPipeError,
errno.EWOULDBLOCK: BlockingIOError,
errno.ESRCH: ProcessLookupError,
}
def wrap_error(func, *args, **kw):
"""
Wrap socket.error, IOError, OSError, select.error to raise new specialized
exceptions of Python 3.3 like InterruptedError (PEP 3151).
"""
try:
return func(*args, **kw)
except (socket.error, IOError, OSError) as exc:
if hasattr(exc, 'winerror'):
_wrap_error(exc, _MAP_ERRNO, exc.winerror)
# _MAP_ERRNO does not contain all Windows errors.
# For some errors like "file not found", exc.errno should
# be used (ex: ENOENT).
_wrap_error(exc, _MAP_ERRNO, exc.errno)
raise
except select.error as exc:
if exc.args:
_wrap_error(exc, _MAP_ERRNO, exc.args[0])
raise
|
wscullin/spack | refs/heads/qmcpack | var/spack/repos/builtin/packages/hpx5/package.py | 3 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Hpx5(AutotoolsPackage):
"""The HPX-5 Runtime System. HPX-5 (High Performance ParalleX) is an
open source, portable, performance-oriented runtime developed at
CREST (Indiana University). HPX-5 provides a distributed
programming model allowing programs to run unmodified on systems
from a single SMP to large clusters and supercomputers with
thousands of nodes. HPX-5 supports a wide variety of Intel and ARM
platforms. It is being used by a broad range of scientific
applications enabling scientists to write code that performs and
scales better than contemporary runtimes."""
homepage = "http://hpx.crest.iu.edu"
url = "http://hpx.crest.iu.edu/release/hpx-3.1.0.tar.gz"
version('4.1.0', '43cb78758506f77416b95276a472f84f')
version('4.0.0', 'b40dc03449ae1039cbb48ee149952b22')
version('3.1.0', '9e90b8ac46788c009079632828c77628')
version('2.0.0', '3d2ff3aab6c46481f9ec65c5b2bfe7a6')
version('1.3.0', '2260ecc7f850e71a4d365a43017d8cee')
version('1.2.0', '4972005f85566af4afe8b71afbf1480f')
version('1.1.0', '646afb460ecb7e0eea713a634933ce4f')
version('1.0.0', '8020822adf6090bd59ed7fe465f6c6cb')
# Don't second-guess what compiler we are using on Cray
patch("configure.patch", when='@4.0.0')
variant('cuda', default=False, description='Enable CUDA support')
variant('cxx11', default=False, description='Enable C++11 hpx++ interface')
variant('debug', default=False, description='Build debug version of HPX-5')
variant('instrumentation', default=False, description='Enable instrumentation (may affect performance)')
variant('metis', default=False, description='Enable METIS support')
variant('mpi', default=False, description='Enable MPI support')
variant('opencl', default=False, description='Enable OpenCL support')
variant('photon', default=False, description='Enable Photon support')
variant('pic', default=True, description='Produce position-independent code')
depends_on("autoconf", type='build')
depends_on("automake", type='build')
depends_on("hwloc")
depends_on("hwloc +cuda", when='+cuda')
# Note: We could disable CUDA support via "hwloc ~cuda"
depends_on("jemalloc")
# depends_on("libffi")
depends_on("libtool", type='build')
# depends_on("lz4") # hpx5 always builds its own lz4
depends_on("m4", type='build')
depends_on("metis", when='+metis')
depends_on("mpi", when='+mpi')
depends_on("mpi", when='+photon')
depends_on("opencl", when='+opencl')
# depends_on("papi")
depends_on("pkg-config", type='build')
configure_directory = "hpx"
build_directory = "spack-build"
def configure_args(self):
spec = self.spec
args = [
'--enable-agas', # make this a variant?
'--enable-jemalloc', # make this a variant?
'--enable-percolation', # make this a variant?
# '--enable-rebalancing', # this seems broken
'--with-hwloc=hwloc',
'--with-jemalloc=jemalloc',
# Spack's libffi installs its headers strangely,
# leading to problems
'--with-libffi=contrib',
# '--with-papi=papi', # currently disabled in HPX
]
if '+cxx11' in spec:
args += ['--enable-hpx++']
if '+debug' in spec:
args += ['--enable-debug']
if '+instrumentation' in spec:
args += ['--enable-instrumentation']
if '+mpi' in spec or '+photon' in spec:
# photon requires mpi
args += ['--enable-mpi']
# Choose pkg-config name for MPI library
if '^openmpi' in spec:
args += ['--with-mpi=ompi-cxx']
elif '^mpich' in spec:
args += ['--with-mpi=mpich']
elif '^mvapich2' in spec:
args += ['--with-mpi=mvapich2-cxx']
else:
args += ['--with-mpi=system']
# METIS does not support pkg-config; HPX will pick it up automatically
# if '+metis' in spec:
# args += ['--with-metis=???']
if '+opencl' in spec:
args += ['--enable-opencl']
if '^pocl' in spec:
args += ['--with-opencl=pocl']
else:
args += ['--with-opencl=system']
if '+photon' in spec:
args += ['--enable-photon']
if '+pic' in spec:
args += ['--with-pic']
return args
|
d-Rickyy-b/TelegramBot | refs/heads/master | lang/language.py | 1 | __author__ = 'Rico'
import codecs
import configparser
translations = configparser.ConfigParser()
translations.read_file(codecs.open("lang/translations.ini", "r", "UTF-8"))
# translation returns the translation for a specific string
def translation(string, language):
if language in translations and string in translations[language]:
return translations[language][string]
elif language == "br":
# TODO remove this part. New users should have pt_BR as lang_id
return translations["pt_BR"][string]
elif "en" in translations and string in translations["en"]:
return translations["en"][string]
return string
|
mstriemer/olympia | refs/heads/master | src/olympia/tags/models.py | 3 | from django.db import models
from django.core.urlresolvers import NoReverseMatch
from olympia import amo
from olympia.amo.models import ModelBase, ManagerBase
from olympia.amo.urlresolvers import reverse
class TagManager(ManagerBase):
def not_denied(self):
"""Get allowed tags only"""
return self.filter(denied=False)
class Tag(ModelBase):
tag_text = models.CharField(max_length=128)
denied = models.BooleanField(default=False)
restricted = models.BooleanField(default=False)
addons = models.ManyToManyField('addons.Addon', through='AddonTag',
related_name='tags')
num_addons = models.IntegerField(default=0)
objects = TagManager()
class Meta:
db_table = 'tags'
ordering = ('tag_text',)
def __unicode__(self):
return self.tag_text
@property
def popularity(self):
return self.num_addons
def can_reverse(self):
try:
self.get_url_path()
return True
except NoReverseMatch:
return False
def get_url_path(self):
return reverse('tags.detail', args=[self.tag_text])
def save_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
AddonTag.objects.get_or_create(addon=addon, tag=tag)
amo.log(amo.LOG.ADD_TAG, tag, addon)
return tag
def remove_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
for addon_tag in AddonTag.objects.filter(addon=addon, tag=tag):
addon_tag.delete()
amo.log(amo.LOG.REMOVE_TAG, tag, addon)
def update_stat(self):
if self.denied:
return
self.num_addons = self.addons.count()
self.save()
class AddonTag(ModelBase):
addon = models.ForeignKey('addons.Addon', related_name='addon_tags')
tag = models.ForeignKey(Tag, related_name='addon_tags')
class Meta:
db_table = 'users_tags_addons'
def update_tag_stat_signal(sender, instance, **kw):
from .tasks import update_tag_stat
if not kw.get('raw'):
try:
update_tag_stat.delay(instance.tag)
except Tag.DoesNotExist:
pass
models.signals.post_save.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='update_tag_stat')
models.signals.post_delete.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='delete_tag_stat')
|
kushalbhola/MyStuff | refs/heads/master | venv/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py | 1152 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
|
drahosj/voting_wars | refs/heads/master | db.py | 2 | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
#SQLite db
engine = create_engine('sqlite:////tmp/test.db', convert_unicode=True)
#MySQL db
#engine = create_engine('mysql+pymysql://root:cdc@localhost/corp', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, \
autoflush=False, \
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property();
def init_db():
import models
Base.metadata.create_all(bind=engine)
|
devlin85/p2pool | refs/heads/master | p2pool/networks/digibyte.py | 2 | from p2pool.bitcoin import networks
PARENT = networks.nets['digibyte']
SHARE_PERIOD = 10 # seconds target spacing
CHAIN_LENGTH = 12*60*60//10 # shares
REAL_CHAIN_LENGTH = 12*60*60//10 # shares
TARGET_LOOKBEHIND = 20 # shares diff regulation
SPREAD = 50 # blocks
IDENTIFIER = '48a4ebc31b798115'.decode('hex')
PREFIX = '5685a276c2dd81db'.decode('hex')
P2P_PORT = 8022
MIN_TARGET = 0
MAX_TARGET = 2**256//2**20 - 1
PERSIST = False
WORKER_PORT = 9022
BOOTSTRAP_ADDRS = 'dgb.mastercryptopool.net dgb2.mastercryptopool.net dgb3.mastercryptopool.net p2pool.e-pool.net:29922'.split(' ')
ANNOUNCE_CHANNEL = '#p2pool-alt'
VERSION_CHECK = lambda v: True
|
jmp0xf/raven-python | refs/heads/master | tests/transport/threaded/tests.py | 12 | import mock
import os
import time
from tempfile import mkstemp
from raven.utils.testutils import TestCase
from raven.base import Client
from raven.transport.threaded import ThreadedHTTPTransport
from raven.utils.urlparse import urlparse
class DummyThreadedScheme(ThreadedHTTPTransport):
def __init__(self, *args, **kwargs):
super(ThreadedHTTPTransport, self).__init__(*args, **kwargs)
self.events = []
self.send_delay = 0
def send_sync(self, data, headers, success_cb, failure_cb):
# delay sending the message, to allow us to test that the shutdown
# hook waits correctly
time.sleep(self.send_delay)
self.events.append((data, headers, success_cb, failure_cb))
class LoggingThreadedScheme(ThreadedHTTPTransport):
def __init__(self, filename, *args, **kwargs):
super(LoggingThreadedScheme, self).__init__(*args, **kwargs)
self.filename = filename
def send_sync(self, data, headers, success_cb, failure_cb):
with open(self.filename, 'a') as log:
log.write("{0} {1}\n".format(os.getpid(), data['message']))
class ThreadedTransportTest(TestCase):
def setUp(self):
self.url = "threaded+http://some_username:some_password@localhost:8143/1"
self.client = Client(dsn=self.url)
@mock.patch('raven.transport.http.HTTPTransport.send')
def test_does_send(self, send):
self.client.captureMessage(message='foo')
time.sleep(0.1)
# TODO: This test could be more precise by ensuring it's sending the same params that are sent
# to the ThreadedHTTPTransport.send() method
self.assertEqual(send.call_count, 1)
def test_shutdown_waits_for_send(self):
url = urlparse(self.url)
transport = DummyThreadedScheme(url)
transport.send_delay = 0.5
data = self.client.build_msg('raven.events.Message', message='foo')
transport.async_send(data, None, None, None)
time.sleep(0.1)
# this should wait for the message to get sent
transport.get_worker().main_thread_terminated()
self.assertEqual(len(transport.events), 1)
def test_fork_with_active_worker(self):
# Test threaded transport when forking with an active worker.
# Forking a process doesn't clone the worker thread - make sure
# logging from both processes still works.
event1 = self.client.build_msg('raven.events.Message', message='parent')
event2 = self.client.build_msg('raven.events.Message', message='child')
url = urlparse(self.url)
fd, filename = mkstemp()
try:
os.close(fd)
transport = LoggingThreadedScheme(filename, url)
# Log from the parent process - starts the worker thread
transport.async_send(event1, None, None, None)
childpid = os.fork()
if childpid == 0:
# Log from the child process
transport.async_send(event2, None, None, None)
# Ensure threaded worker has finished
transport.get_worker().stop()
os._exit(0)
# Wait for the child process to finish
os.waitpid(childpid, 0)
assert os.path.isfile(filename)
# Ensure threaded worker has finished
transport.get_worker().stop()
with open(filename, 'r') as logfile:
events = dict(x.strip().split() for x in logfile.readlines())
# Check parent and child both logged successfully
assert events == {
str(os.getpid()): 'parent',
str(childpid): 'child',
}
finally:
os.remove(filename)
|
jakirkham/ilastik | refs/heads/master | ilastik/utility/__init__.py | 3 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from simpleSignal import SimpleSignal
from bind import bind
from multiLaneOperator import MultiLaneOperatorABC
from operatorSubView import OperatorSubView
from opMultiLaneWrapper import OpMultiLaneWrapper
from log_exception import log_exception
from autocleaned_tempdir import autocleaned_tempdir |
ant-t/heekscnc | refs/heads/master | nc/num_reader.py | 30 | import nc_read as nc
import sys
import math
# a base class for hpgl parsers, and maybe others
class NumReader(nc.Parser):
def __init__(self, writer):
nc.Parser.__init__(self, writer)
def get_number(self):
number = ''
# skip spaces and commas at start of number
while(self.line_index < self.line_length):
c = self.line[self.line_index]
if c == ' ' or c == ',':
self.parse_word += c
else:
break
self.line_index = self.line_index + 1
while(self.line_index < self.line_length):
c = self.line[self.line_index]
if c == '.' or c == '0' or c == '1' or c == '2' or c == '3' or c == '4' or c == '5' or c == '6' or c == '7' or c == '8' or c == '9' or c == '-':
number += c
else:
break
self.parse_word += c
self.line_index = self.line_index + 1
return number
def add_word(self, color):
self.writer.add_text(self.parse_word, color, None)
self.parse_word = ""
def Parse(self, name):
self.file_in = open(name, 'r')
while self.readline():
self.writer.begin_ncblock()
self.parse_word = ""
self.line_index = 0
self.line_length = len(self.line)
while self.line_index < self.line_length:
c = self.line[self.line_index]
self.parse_word += c
self.ParseFromFirstLetter(c)
self.line_index = self.line_index + 1
self.writer.add_text(self.parse_word, None, None)
self.writer.end_ncblock()
self.file_in.close()
|
metamarcdw/PyBitmessage-I2P | refs/heads/master | src/bitmessageqt/addaddressdialog.py | 18 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addaddressdialog.ui'
#
# Created: Sat Nov 30 20:35:38 2013
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AddAddressDialog(object):
def setupUi(self, AddAddressDialog):
AddAddressDialog.setObjectName(_fromUtf8("AddAddressDialog"))
AddAddressDialog.resize(368, 162)
self.formLayout = QtGui.QFormLayout(AddAddressDialog)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_2 = QtGui.QLabel(AddAddressDialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.label_2)
self.newAddressLabel = QtGui.QLineEdit(AddAddressDialog)
self.newAddressLabel.setObjectName(_fromUtf8("newAddressLabel"))
self.formLayout.setWidget(2, QtGui.QFormLayout.SpanningRole, self.newAddressLabel)
self.label = QtGui.QLabel(AddAddressDialog)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label)
self.lineEditAddress = QtGui.QLineEdit(AddAddressDialog)
self.lineEditAddress.setObjectName(_fromUtf8("lineEditAddress"))
self.formLayout.setWidget(5, QtGui.QFormLayout.SpanningRole, self.lineEditAddress)
self.labelAddressCheck = QtGui.QLabel(AddAddressDialog)
self.labelAddressCheck.setText(_fromUtf8(""))
self.labelAddressCheck.setWordWrap(True)
self.labelAddressCheck.setObjectName(_fromUtf8("labelAddressCheck"))
self.formLayout.setWidget(6, QtGui.QFormLayout.SpanningRole, self.labelAddressCheck)
self.buttonBox = QtGui.QDialogButtonBox(AddAddressDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.buttonBox)
self.retranslateUi(AddAddressDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), AddAddressDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), AddAddressDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AddAddressDialog)
def retranslateUi(self, AddAddressDialog):
AddAddressDialog.setWindowTitle(_translate("AddAddressDialog", "Add new entry", None))
self.label_2.setText(_translate("AddAddressDialog", "Label", None))
self.label.setText(_translate("AddAddressDialog", "Address", None))
|
ishank08/scikit-learn | refs/heads/master | examples/datasets/plot_digits_last_image.py | 386 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Digit Dataset
=========================================================
This dataset is made up of 1797 8x8 images. Each image,
like the one shown below, is of a hand-written digit.
In order to utilize an 8x8 figure like this, we'd have to
first transform it into a feature vector with length 64.
See `here
<http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits>`_
for more information about this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
from sklearn import datasets
import matplotlib.pyplot as plt
#Load the digits dataset
digits = datasets.load_digits()
#Display the first digit
plt.figure(1, figsize=(3, 3))
plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
|
spring-week-topos/cinder-week | refs/heads/spring-week | cinder/tests/test_hplefthand.py | 1 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
from hplefthandclient import exceptions as hpexceptions
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder import units
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class HPLeftHandBaseDriver():
cluster_id = 1
volume_name = "fakevolume"
volume_id = 1
volume = {
'name': volume_name,
'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:'
'group01:25366:fakev 0'),
'id': volume_id,
'provider_auth': None,
'size': 1}
serverName = 'fakehost'
server_id = 0
snapshot_name = "fakeshapshot"
snapshot_id = 3
snapshot = {
'name': snapshot_name,
'volume_name': volume_name}
cloned_volume_name = "clone_volume"
cloned_volume = {'name': cloned_volume_name}
cloned_snapshot_name = "clonedshapshot"
cloned_snapshot_id = 5
cloned_snapshot = {
'name': cloned_snapshot_name,
'volume_name': volume_name}
volume_type_id = 4
init_iqn = 'iqn.1993-08.org.debian:01:222'
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': serverName}
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1)]
class TestHPLeftHandCLIQISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Return fake results for the various methods."""
def create_volume(cliq_args):
"""Create volume CLIQ input for test.
input = "createVolume description="fake description"
clusterName=Cluster01 volumeName=fakevolume
thinProvision=0 output=XML size=1GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['thinProvision'], '1')
self.assertEqual(cliq_args['size'], '1GB')
return output, None
def delete_volume(cliq_args):
"""Delete volume CLIQ input for test.
input = "deleteVolume volumeName=fakevolume prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def extend_volume(cliq_args):
"""Extend volume CLIQ input for test.
input = "modifyVolume description="fake description"
volumeName=fakevolume
output=XML size=2GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['size'], '2GB')
return output, None
def assign_volume(cliq_args):
"""Assign volume CLIQ input for test.
input = "assignVolumeToServer volumeName=fakevolume
serverName=fakehost
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="174" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'],
self.connector['host'])
return output, None
def unassign_volume(cliq_args):
"""Unassign volume CLIQ input for test.
input = "unassignVolumeToServer volumeName=fakevolume
serverName=fakehost output=XML
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="205" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'],
self.connector['host'])
return output, None
def create_snapshot(cliq_args):
"""Create snapshot CLIQ input for test.
input = "createSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def delete_snapshot(cliq_args):
"""Delete shapshot CLIQ input for test.
input = "deleteSnapshot snapshotName=fakesnapshot prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def create_volume_from_snapshot(cliq_args):
"""Create volume from snapshot CLIQ input for test.
input = "cloneSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def get_cluster_info(cliq_args):
"""Get cluster info CLIQ input for test.
input = "getClusterInfo clusterName=Cluster01 searchDepth=1
verbose=0 output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="1164" result="0">
<cluster blockSize="1024" description=""
maxVolumeSizeReplication1="622957690"
maxVolumeSizeReplication2="311480287"
minVolumeSize="262144" name="Cluster01"
pageSize="262144" spaceTotal="633697992"
storageNodeCount="2" unprovisionedSpace="622960574"
useVip="true">
<nsm ipAddress="10.0.1.7" name="111-vsa"/>
<nsm ipAddress="10.0.1.8" name="112-vsa"/>
<vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
</cluster></response></gauche>"""
return output, None
def get_volume_info(cliq_args):
"""Get volume info CLIQ input for test.
input = "getVolumeInfo volumeName=fakevolume output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<volume autogrowPages="4" availability="online"
blockSize="1024" bytesWritten="0" checkSum="false"
clusterName="Cluster01" created="2011-02-08T19:56:53Z"
deleting="false" description="" groupName="Group01"
initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
minReplication="1" name="vol-b" parity="0" replication="2"
reserveQuota="536870912" scratchQuota="4194304"
serialNumber="9fa5c8b2cca54b2948a63d8"
size="1073741824" stridePages="32" thinProvision="true">
<status description="OK" value="2"/>
<permission access="rw" authGroup="api-1"
chapName="chapusername" chapRequired="true"
id="25369" initiatorSecret="" iqn=""
iscsiEnabled="true" loadBalance="true"
targetSecret="supersecret"/>
</volume></response></gauche>"""
return output, None
def get_snapshot_info(cliq_args):
"""Get snapshot info CLIQ input for test.
input = "getSnapshotInfo snapshotName=fakesnapshot output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<snapshot applicationManaged="false" autogrowPages="32768"
automatic="false" availability="online" bytesWritten="0"
clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
deleting="false" description="" groupName="CloudGroup1"
id="730" initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudgroup1:73"
md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
replication="2" reserveQuota="536870912" scheduleId="0"
scratchQuota="4194304" scratchWritten="0"
serialNumber="a64b4f850539c07fb5ce3cee5db1fcce"
size="2147483648" stridePages="32"
volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce">
<status description="OK" value="2"/>
<permission access="rw"
authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
chapName="chapusername" chapRequired="true" id="25369"
initiatorSecret="" iqn="" iscsiEnabled="true"
loadBalance="true" targetSecret="supersecret"/>
</snapshot></response></gauche>"""
return output, None
def get_server_info(cliq_args):
"""Get server info CLIQ input for test.
input = "getServerInfo serverName=fakeName"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def create_server(cliq_args):
"""Create server CLIQ input for test.
input = "createServer serverName=fakeName initiator=something"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def test_error(cliq_args):
output = """<gauche version="1.0">
<response description="Volume '134234' not found."
name="CliqVolumeNotFound" processingTime="1083"
result="8000100c"/>
</gauche>"""
return output, None
self.assertEqual(cliq_args['output'], 'XML')
try:
verbs = {'createVolume': create_volume,
'deleteVolume': delete_volume,
'modifyVolume': extend_volume,
'assignVolumeToServer': assign_volume,
'unassignVolumeToServer': unassign_volume,
'createSnapshot': create_snapshot,
'deleteSnapshot': delete_snapshot,
'cloneSnapshot': create_volume_from_snapshot,
'getClusterInfo': get_cluster_info,
'getVolumeInfo': get_volume_info,
'getSnapshotInfo': get_snapshot_info,
'getServerInfo': get_server_info,
'createServer': create_server,
'testError': test_error}
except KeyError:
raise NotImplementedError()
return verbs[verb](cliq_args)
def setUp(self):
super(TestHPLeftHandCLIQISCSIDriver, self).setUp()
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': self.volume_id}
def tearDown(self):
super(TestHPLeftHandCLIQISCSIDriver, self).tearDown()
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.san_ip = '10.10.10.10'
mock_conf.san_login = 'foo'
mock_conf.san_password = 'bar'
mock_conf.san_ssh_port = 16022
mock_conf.san_clustername = 'CloudCluster1'
mock_conf.hplefthand_api_url = None
return mock_conf
def setup_driver(self, config=None):
if config is None:
config = self.default_mock_conf()
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
self.driver.proxy._cliq_run = mock.Mock(
side_effect=self._fake_cliq_run)
return self.driver.proxy._cliq_run
def test_create_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name, 'size': 1}
model_update = self.driver.create_volume(volume)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = [
mock.call(
'createVolume', {
'clusterName': 'CloudCluster1',
'volumeName': 'fakevolume',
'thinProvision': '1',
'output': 'XML',
'size': '1GB'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.delete_volume(volume)
expected = [
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'deleteVolume', {
'volumeName': 'fakevolume',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_extend_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.extend_volume(volume, 2)
expected = [
mock.call(
'modifyVolume', {
'volumeName': 'fakevolume',
'output': 'XML',
'size': '2GB'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_initialize_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
self.driver.proxy._get_iscsi_properties = mock.Mock(
return_value=self.properties)
volume = {'name': self.volume_name}
result = self.driver.initialize_connection(volume,
self.connector)
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertDictMatch(result['data'], self.properties)
expected = [
mock.call(
'getServerInfo', {
'output': 'XML',
'serverName': 'fakehost'},
False),
mock.call(
'assignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_terminate_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.terminate_connection(volume, self.connector)
expected = [
mock.call(
'unassignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name,
'volume_name': self.volume_name}
self.driver.create_snapshot(snapshot)
expected = [
mock.call(
'createSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'inheritAccess': 1,
'volumeName': 'fakevolume'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name}
self.driver.delete_snapshot(snapshot)
expected = [
mock.call(
'getSnapshotInfo', {
'snapshotName': 'fakeshapshot',
'output': 'XML'},
True),
mock.call(
'deleteSnapshot', {
'snapshotName': 'fakeshapshot',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_volume_from_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
snapshot = {'name': self.snapshot_name}
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = [
mock.call(
'cloneSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'volumeName': 'fakevolume'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_get_volume_stats(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume_stats = self.driver.get_volume_stats(True)
self.assertEqual(volume_stats['vendor_name'], 'Hewlett-Packard')
self.assertEqual(volume_stats['storage_protocol'], 'iSCSI')
expected = [
mock.call('getClusterInfo', {
'searchDepth': 1,
'clusterName': 'CloudCluster1',
'output': 'XML'}, True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1)]
def setUp(self):
super(TestHPLeftHandRESTISCSIDriver, self).setUp()
def tearDown(self):
super(TestHPLeftHandRESTISCSIDriver, self).tearDown()
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.hplefthand_api_url = 'http://fake.foo:8080/lhos'
mock_conf.hplefthand_username = 'foo1'
mock_conf.hplefthand_password = 'bar2'
mock_conf.hplefthand_iscsi_chap_enabled = False
mock_conf.hplefthand_debug = False
mock_conf.hplefthand_clustername = "CloudCluster1"
return mock_conf
@mock.patch('hplefthandclient.client.HPLeftHandClient', spec=True)
def setup_driver(self, _mock_client, config=None):
if config is None:
config = self.default_mock_conf()
_mock_client.return_value.getClusterByName.return_value = {
'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]}
_mock_client.return_value.getCluster.return_value = {
'spaceTotal': units.GiB * 500,
'spaceAvailable': units.GiB * 250}
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
return _mock_client.return_value
def test_create_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute driver
volume_info = self.driver.create_volume(self.volume)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
# mock HTTPServerError
mock_client.createVolume.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.volume)
@mock.patch.object(
volume_types,
'get_volume_type',
return_value={'extra_specs': {'hplh:provisioning': 'full'}})
def test_create_volume_with_es(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute creat_volume
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': False, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute delete_volume
self.driver.delete_volume(self.volume)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.deleteVolume(self.volume_id)]
mock_client.assert_has_calls(expected)
# mock HTTPNotFound (volume not found)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# no exception should escape method
self.driver.delete_volume(self.volume)
# mock HTTPConflict
mock_client.deleteVolume.side_effect = hpexceptions.HTTPConflict()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, self.volume_id)
def test_extend_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute extend_volume
self.driver.extend_volume(self.volume, 2)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'size': 2 * units.GiB})]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.modifyVolume.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, self.volume, 2)
def test_initialize_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {'id': self.server_id}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertEqual(result['data']['target_discovered'], False)
self.assertEqual(result['data']['volume_id'], self.volume_id)
self.assertTrue('auth_method' not in result['data'])
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.createServer.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.initialize_connection, self.volume, self.connector)
def test_initialize_connection_with_chaps(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {
'id': self.server_id,
'chapAuthenticationRequired': True,
'chapTargetSecret': 'dont_tell'}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertEqual(result['data']['target_discovered'], False)
self.assertEqual(result['data']['volume_id'], self.volume_id)
self.assertEqual(result['data']['auth_method'], 'CHAP')
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
def test_terminate_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getServerByName.return_value = {'id': self.server_id}
# execute terminate_connection
self.driver.terminate_connection(self.volume, self.connector)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getServerByName('fakehost'),
mock.call.removeServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.terminate_connection,
self.volume,
self.connector)
def test_create_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute create_snapshot
self.driver.create_snapshot(self.snapshot)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.createSnapshot(
'fakeshapshot',
1,
{'inheritAccess': True})]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_snapshot, self.snapshot)
def test_delete_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
# execute delete_snapshot
self.driver.delete_snapshot(self.snapshot)
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.deleteSnapshot(3)]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getSnapshotByName.side_effect = hpexceptions.HTTPNotFound()
# no exception is thrown, just error msg is logged
self.driver.delete_snapshot(self.snapshot)
# mock HTTPServerError (array failure)
ex = hpexceptions.HTTPServerError({'message': 'Some message.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
self.snapshot)
# mock HTTPServerError because the snap is in use
ex = hpexceptions.HTTPServerError({
'message':
'Hey, dude cannot be deleted because it is a clone point duh.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_create_volume_from_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
mock_client.cloneSnapshot.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute create_volume_from_snapshot
model_update = self.driver.create_volume_from_snapshot(
self.volume, self.snapshot)
expected_iqn = 'iqn.1993-08.org.debian:01:222 0'
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.cloneSnapshot('fakevolume', 3)]
# validate call chain
mock_client.assert_has_calls(expected)
def test_create_cloned_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute create_cloned_volume
self.driver.create_cloned_volume(
self.cloned_volume, self.volume)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.cloneVolume('clone_volume', 1)]
# validate call chain
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
# 2 extra specs we don't care about, and
# 1 that will get mapped
_mock_get_volume_type.return_value = {
'extra_specs': {
'foo:bar': 'fake',
'bar:foo': 1234,
'hplh:provisioning': 'full'}}
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
self.assertDictMatch({'isThinProvisioned': False}, optional)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping_invalid_value(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
_mock_get_volume_type.return_value = {
'extra_specs': {
# r-07 is an invalid value for hplh:ao
'hplh:data_pl': 'r-07',
'hplh:ao': 'true'}}
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
# {'hplh:ao': 'true'} should map to
# {'isAdaptiveOptimizationEnabled': True}
# without hplh:data_pl since r-07 is an invalid value
self.assertDictMatch({'isAdaptiveOptimizationEnabled': True}, optional)
def test_retype_with_no_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'foo': False, 'bar': 2, 'error': True}
key_specs_new = {'foo': True, 'bar': 5, 'error': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume')]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_only_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'thin'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1, {
'isThinProvisioned': False,
'isAdaptiveOptimizationEnabled': True})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_both_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'foo': 'bar'}
key_specs_new = {'hplh:provisioning': 'thin', 'foo': 'foobar'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'isThinProvisioned': True})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_same_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'false'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1,
{'isAdaptiveOptimizationEnabled': False})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_migrate_no_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
host = {'host': self.serverName, 'capabilities': {}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
# only startup code is called
mock_client.assert_has_calls(self.driver_startup_call_stack)
# and nothing else
self.assertEqual(
len(self.driver_startup_call_stack),
len(mock_client.method_calls))
def test_migrate_incorrect_vip(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.10",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster')]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': None}}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertTrue(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]'),
mock.call.modifyVolume(1, {'clusterName': 'New_CloudCluster'})]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_Snapshots(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': 'snapfoo'}}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]')]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'true'}})
def test_create_volume_with_ao_true(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called without
# isAdaptiveOptimizationEnabled == true
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'false'}})
def test_create_volume_with_ao_false(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called with
# isAdaptiveOptimizationEnabled == false
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1',
'isAdaptiveOptimizationEnabled': False})]
mock_client.assert_has_calls(expected)
|
cchanrhiza/python-pptx | refs/heads/master | docs/conf.py | 4 | # -*- coding: utf-8 -*-
#
# python-pptx documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 29 13:59:35 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from pptx import __version__
# -- General configuration --------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-pptx'
copyright = u'2012, 2013, Steve Canny'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# A string of reStructuredText that will be included at the end of every source
# file that is read. This is the right place to add substitutions that should
# be available in every file.
rst_epilog = """
.. |Adjustment| replace:: :class:`Adjustment`
.. |AdjustmentCollection| replace:: :class:`AdjustmentCollection`
.. |AttributeError| replace:: :exc:`AttributeError`
.. |Axis| replace:: :class:`Axis`
.. |BarPlot| replace:: :class:`.BarPlot`
.. |BarSeries| replace:: :class:`.BarSeries`
.. |BaseFileSystem| replace:: :class:`BaseFileSystem`
.. |BaseShape| replace:: :class:`BaseShape`
.. |_Cell| replace:: :class:`_Cell`
.. |Chart| replace:: :class:`.Chart`
.. |ChartData| replace:: :class:`.ChartData`
.. |ChartPart| replace:: :class:`.ChartPart`
.. |ChartXmlWriter| replace:: :class:`.ChartXmlWriter`
.. |Collection| replace:: :class:`Collection`
.. |ColorFormat| replace:: :class:`ColorFormat`
.. |_Column| replace:: :class:`_Column`
.. |_ColumnCollection| replace:: :class:`_ColumnCollection`
.. |CoreProperties| replace:: :class:`CoreProperties`
.. |DataLabels| replace:: :class:`.DataLabels`
.. |datetime| replace:: :class:`datetime.datetime`
.. |DirectoryFileSystem| replace:: :class:`DirectoryFileSystem`
.. |Emu| replace:: :class:`.Emu`
.. |False| replace:: :class:`False`
.. |FileSystem| replace:: :class:`FileSystem`
.. |FillFormat| replace:: :class:`.FillFormat`
.. |float| replace:: :class:`float`
.. |Font| replace:: :class:`.Font`
.. |GraphicFrame| replace:: :class:`.GraphicFrame`
.. |_Hyperlink| replace:: :class:`_Hyperlink`
.. |Image| replace:: :class:`.Image`
.. |ImagePart| replace:: :class:`.ImagePart`
.. |Inches| replace:: :class:`.Inches`
.. |int| replace:: :class:`int`
.. |InvalidXmlError| replace:: :exc:`InvalidXmlError`
.. |KeyError| replace:: :exc:`KeyError`
.. |LayoutPlaceholder| replace:: :class:`LayoutPlaceholder`
.. |_LayoutPlaceholders| replace:: :class:`_LayoutPlaceholders`
.. |_LayoutShapeTree| replace:: :class:`_LayoutShapeTree`
.. |Legend| replace:: :class:`.Legend`
.. |Length| replace:: :class:`.Length`
.. |LineFormat| replace:: :class:`.LineFormat`
.. |LineSeries| replace:: :class:`.LineSeries`
.. |list| replace:: :class:`list`
.. |MasterPlaceholder| replace:: :class:`MasterPlaceholder`
.. |_MasterPlaceholders| replace:: :class:`_MasterPlaceholders`
.. |_MasterShapeTree| replace:: :class:`_MasterShapeTree`
.. |None| replace:: :class:`None`
.. |NotImplementedError| replace:: :exc:`NotImplementedError`
.. |OpcPackage| replace:: :class:`.OpcPackage`
.. |Package| replace:: :class:`Package`
.. |PackURI| replace:: :class:`.PackURI`
.. |_Paragraph| replace:: :class:`_Paragraph`
.. |Part| replace:: :class:`Part`
.. |PartTypeSpec| replace:: :class:`PartTypeSpec`
.. |Picture| replace:: :class:`.Picture`
.. |_PlaceholderFormat| replace:: :class:`._PlaceholderFormat`
.. |PlaceholderGraphicFrame| replace:: :class:`.PlaceholderGraphicFrame`
.. |PlaceholderPicture| replace:: :class:`.PlaceholderPicture`
.. |Plot| replace:: :class:`.Plot`
.. |Plots| replace:: :class:`.Plots`
.. |pp| replace:: `python-pptx`
.. |Presentation| replace:: :class:`~pptx.Presentation`
.. |Pt| replace:: :class:`.Pt`
.. |_Relationship| replace:: :class:`._Relationship`
.. |RelationshipCollection| replace:: :class:`RelationshipCollection`
.. |RGBColor| replace:: :class:`.RGBColor`
.. |_Row| replace:: :class:`_Row`
.. |_RowCollection| replace:: :class:`_RowCollection`
.. |_Run| replace:: :class:`_Run`
.. |Series| replace:: :class:`.Series`
.. |SeriesCollection| replace:: :class:`.SeriesCollection`
.. |Shape| replace:: :class:`.Shape`
.. |ShapeCollection| replace:: :class:`.ShapeCollection`
.. |ShapeTree| replace:: :class:`.ShapeTree`
.. |Slide| replace:: :class:`Slide`
.. |_Slides| replace:: :class:`._Slides`
.. |SlideLayout| replace:: :class:`.SlideLayout`
.. |SlideMaster| replace:: :class:`.SlideMaster`
.. |_SlidePlaceholders| replace:: :class:`._SlidePlaceholders`
.. |SlideShapeTree| replace:: :class:`.SlideShapeTree`
.. |str| replace:: :class:`str`
.. |Table| replace:: :class:`Table`
.. |TextFrame| replace:: :class:`.TextFrame`
.. |TickLabels| replace:: :class:`.TickLabels`
.. |True| replace:: :class:`True`
.. |ValueAxis| replace:: :class:`.ValueAxis`
.. |ValueError| replace:: :exc:`ValueError`
.. |WorkbookWriter| replace:: :class:`.WorkbookWriter`
.. |ZipFileSystem| replace:: :class:`ZipFileSystem`
"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sidebarlinks.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-pptxdoc'
# -- Options for LaTeX output -----------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'python-pptx.tex', u'python-pptx Documentation',
u'Steve Canny', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -----------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-pptx', u'python-pptx Documentation',
[u'Steve Canny'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ---------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-pptx', u'python-pptx Documentation',
u'Steve Canny', 'python-pptx', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
sujeet4github/MyLangUtils | refs/heads/master | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/wtforms/ext/django/__init__.py | 177 | import warnings
warnings.warn(
'wtforms.ext.django is deprecated, and will be removed in WTForms 3.0. '
'The package has been split out into its own package, wtforms-django: '
'https://github.com/wtforms/wtforms-django',
DeprecationWarning
)
|
chenjun0210/tensorflow | refs/heads/master | tensorflow/python/ops/clip_ops.py | 67 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Args:
t: A `Tensor`.
clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
Raises:
ValueError: if the clip tensors would trigger array broadcasting
that would make the returned tensor larger than the input.
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
t = ops.convert_to_tensor(t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(t, clip_value_max)
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = t.shape.merge_with(t_min.shape)
t_max = math_ops.maximum(t_min, clip_value_min, name=name)
_ = t.shape.merge_with(t_max.shape)
return t_max
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm equal to `clip_norm`. If `axes == [0]`
instead, each column of the output will be clipped.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, axes, keep_dims=True))
intermediate = t * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = t.shape.merge_with(intermediate.shape)
tclip = array_ops.identity(intermediate * math_ops.minimum(
l2norm_inv, constant_op.constant(1.0, dtype=t.dtype) / clip_norm),
name=name)
return tclip
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
with ops.name_scope(name, "global_norm", t_list) as name:
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
half_squared_norms = []
for v in values:
if v is not None:
with ops.colocate_with(v):
half_squared_norms.append(gen_nn_ops.l2_loss(v))
half_squared_norm = math_ops.reduce_sum(array_ops.stack(half_squared_norms))
norm = math_ops.sqrt(
half_squared_norm *
constant_op.constant(2.0, dtype=half_squared_norm.dtype),
name="global_norm")
return norm
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,
if you've already computed the global norm for `t_list`, you can specify
the global norm with `use_norm`.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
Any of the entries of `t_list` that are of type `None` are ignored.
This is the correct way to perform gradient clipping (for example, see
[Pascanu et al., 2012](http://arxiv.org/abs/1211.5063)
([pdf](http://arxiv.org/pdf/1211.5063.pdf))).
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = global_norm(t_list, name)
with ops.name_scope(name, "clip_by_global_norm",
t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * math_ops.minimum(
1.0 / use_norm,
constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with ops.colocate_with(v):
values_clipped.append(
array_ops.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
ops.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, ops.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm`. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
name=name)
return tclip
|
simongoffin/website_version | refs/heads/Multi_fonctionnel | addons/pad/py_etherpad/__init__.py | 505 | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
|
damiencalloway/djtut | refs/heads/master | mysite/env/lib/python2.7/site-packages/django/contrib/gis/geoip/base.py | 105 | import os
import re
from ctypes import c_char_p
from django.core.validators import ipv4_re
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIPRecord, GeoIPTag, GeoIP_open, GeoIP_delete, GeoIP_database_info,
GeoIP_lib_version, GeoIP_record_by_addr, GeoIP_record_by_name,
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name)
from django.utils import six
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP classes ####
class GeoIPException(Exception): pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = dict((opt, None) for opt in (0, 1, 2, 4, 8))
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(country_db, cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(city_db, cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(path, cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if self._country: GeoIP_delete(self._country)
if self._city: GeoIP_delete(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# GeoIP only takes ASCII-encoded strings.
query = query.encode('ascii')
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller.
return query
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, query)
else:
return GeoIP_country_code_by_name(self._country, query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, query)
else:
return GeoIP_country_name_by_name(self._country, query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictonary with with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code' : self.country_code(query),
'country_name' : self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None: return None
else: return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Retuns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
|
gzorin/OpenShadingLanguage | refs/heads/dev/gzorin/delegate-compile | testsuite/render-cornell/run.py | 6 | #!/usr/bin/env python
failthresh = max (failthresh, 0.005) # allow a little more LSB noise between platforms
outputs = [ "out.exr" ]
command = testrender("-r 256 256 -aa 4 cornell.xml out.exr")
|
ina-foss/ID-Fits | refs/heads/master | bin/others/convert_alignment_model_from_text_to_binary.py | 1 | import struct
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Converts a model from text format to binary format.")
parser.add_argument("input_text_file", help="Input model file in text format to convert.")
parser.add_argument("output_file", help="Output model file in binary format.")
args = parser.parse_args()
input_file = open(args.input_text_file, "r")
output_file = open(args.output_file, "wb")
T, N, D, L = [int(x) for x in input_file.readline().split()]
output_file.write(struct.pack("<4I", T, N, D, L))
mean_shape = map(float, input_file.readline().split())
output_file.write(struct.pack("<%if" % len(mean_shape), *mean_shape))
for _ in range(T):
for _ in range(L):
for _ in range(N):
for _ in range(2**(D+1)-1):
line = input_file.readline().split()
d, p = map(int, line[:2])
indexes = map(float, line[2:])
output_file.write(struct.pack("<2I4f", d, p, *indexes))
l = int(input_file.readline())
output_file.write(struct.pack("<I", l))
for _ in range(N*(2**D)):
increment = map(float, input_file.readline().split())
output_file.write(struct.pack("<%if" % len(increment), *increment))
|
ZLLab-Mooc/edx-platform | refs/heads/named-release/dogwood.rc | lms/djangoapps/courseware/management/commands/clean_history.py | 30 | """A command to clean the StudentModuleHistory table.
When we added XBlock storage, each field modification wrote a new history row
to the db. Now that we have bulk saves to avoid that database hammering, we
need to clean out the unnecessary rows from the database.
This command that does that.
"""
import datetime
import json
import logging
import optparse
import time
import traceback
from django.core.management.base import NoArgsCommand
from django.db import transaction
from django.db.models import Max
from courseware.models import StudentModuleHistory
class Command(NoArgsCommand):
"""The actual clean_history command to clean history rows."""
help = "Deletes unneeded rows from the StudentModuleHistory table."
option_list = NoArgsCommand.option_list + (
optparse.make_option(
'--batch',
type='int',
default=100,
help="Batch size, number of module_ids to examine in a transaction.",
),
optparse.make_option(
'--dry-run',
action='store_true',
default=False,
help="Don't change the database, just show what would be done.",
),
optparse.make_option(
'--sleep',
type='float',
default=0,
help="Seconds to sleep between batches.",
),
)
def handle_noargs(self, **options):
# We don't want to see the SQL output from the db layer.
logging.getLogger("django.db.backends").setLevel(logging.INFO)
smhc = StudentModuleHistoryCleaner(
dry_run=options["dry_run"],
)
smhc.main(batch_size=options["batch"], sleep=options["sleep"])
class StudentModuleHistoryCleaner(object):
"""Logic to clean rows from the StudentModuleHistory table."""
DELETE_GAP_SECS = 0.5 # Rows this close can be discarded.
STATE_FILE = "clean_history.json"
BATCH_SIZE = 100
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.next_student_module_id = 0
self.last_student_module_id = 0
def main(self, batch_size=None, sleep=0):
"""Invoked from the management command to do all the work."""
batch_size = batch_size or self.BATCH_SIZE
self.last_student_module_id = self.get_last_student_module_id()
self.load_state()
while self.next_student_module_id <= self.last_student_module_id:
with transaction.atomic():
for smid in self.module_ids_to_check(batch_size):
try:
self.clean_one_student_module(smid)
except Exception: # pylint: disable=broad-except
trace = traceback.format_exc()
self.say("Couldn't clean student_module_id {}:\n{}".format(smid, trace))
if self.dry_run:
transaction.set_rollback(True)
else:
self.say("Committing")
self.save_state()
if sleep:
time.sleep(sleep)
def say(self, message):
"""
Display a message to the user.
The message will have a trailing newline added to it.
"""
print message
def load_state(self):
"""
Load the latest state from disk.
"""
try:
state_file = open(self.STATE_FILE)
except IOError:
self.say("No stored state")
self.next_student_module_id = 0
else:
with state_file:
state = json.load(state_file)
self.say(
"Loaded stored state: {}".format(
json.dumps(state, sort_keys=True)
)
)
self.next_student_module_id = state['next_student_module_id']
def save_state(self):
"""
Save the state to disk.
"""
state = {
'next_student_module_id': self.next_student_module_id,
}
with open(self.STATE_FILE, "w") as state_file:
json.dump(state, state_file)
self.say("Saved state: {}".format(json.dumps(state, sort_keys=True)))
def get_last_student_module_id(self):
"""
Return the id of the last student_module.
"""
last = StudentModuleHistory.objects.all() \
.aggregate(Max('student_module'))['student_module__max']
self.say("Last student_module_id is {}".format(last))
return last
def module_ids_to_check(self, batch_size):
"""Produce a sequence of student module ids to check.
`batch_size` is how many module ids to produce, max.
The sequence starts with `next_student_module_id`, and goes up to
and including `last_student_module_id`.
`next_student_module_id` is updated as each id is yielded.
"""
start = self.next_student_module_id
for smid in range(start, start + batch_size):
if smid > self.last_student_module_id:
break
yield smid
self.next_student_module_id = smid + 1
def get_history_for_student_modules(self, student_module_id):
"""
Get the history rows for a student module.
```student_module_id```: the id of the student module we're
interested in.
Return a list: [(id, created), ...], all the rows of history.
"""
history = StudentModuleHistory.objects \
.filter(student_module=student_module_id) \
.order_by('created', 'id')
return [(row.id, row.created) for row in history]
def delete_history(self, ids_to_delete):
"""
Delete history rows.
```ids_to_delete```: a non-empty list (or set...) of history row ids to delete.
"""
assert ids_to_delete
StudentModuleHistory.objects.filter(id__in=ids_to_delete).delete()
def clean_one_student_module(self, student_module_id):
"""Clean one StudentModule's-worth of history.
`student_module_id`: the id of the StudentModule to process.
"""
delete_gap = datetime.timedelta(seconds=self.DELETE_GAP_SECS)
history = self.get_history_for_student_modules(student_module_id)
if not history:
self.say("No history for student_module_id {}".format(student_module_id))
return
ids_to_delete = []
next_created = None
for history_id, created in reversed(history):
if next_created is not None:
# Compare this timestamp with the next one.
if (next_created - created) < delete_gap:
# This row is followed closely by another, we can discard
# this one.
ids_to_delete.append(history_id)
next_created = created
verb = "Would have deleted" if self.dry_run else "Deleting"
self.say("{verb} {to_delete} rows of {total} for student_module_id {id}".format(
verb=verb,
to_delete=len(ids_to_delete),
total=len(history),
id=student_module_id,
))
if ids_to_delete and not self.dry_run:
self.delete_history(ids_to_delete)
|
FractalBrew/GoogleMusicChannel.bundle | refs/heads/master | Contents/Libraries/Shared/distutils/command/bdist_dumb.py | 151 | """distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from sysconfig import get_python_version
from distutils.util import get_platform
from distutils.core import Command
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import DistutilsPlatformError
from distutils import log
class bdist_dumb (Command):
description = 'create a "dumb" built distribution'
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
samchrisinger/osf.io | refs/heads/develop | tests/test_notifications.py | 7 | import collections
import datetime
import mock
import pytz
from babel import dates, Locale
from schema import Schema, And, Use, Or
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from nose.tools import * # noqa PEP8 asserts
from framework.auth import Auth
from framework.auth.core import User
from framework.guid.model import Guid
from website.notifications.tasks import get_users_emails, send_users_email, group_by_node, remove_notifications
from website.notifications import constants
from website.notifications.model import NotificationDigest
from website.notifications.model import NotificationSubscription
from website.notifications import emails
from website.notifications import utils
from website.project.model import Node, Comment
from website import mails, settings
from website.project.signals import contributor_removed, node_deleted
from website.util import api_url_for
from website.util import web_url_for
from tests import factories
from tests.base import capture_signals
from tests.base import OsfTestCase, NotificationTestCase
class TestNotificationsModels(OsfTestCase):
def setUp(self):
super(TestNotificationsModels, self).setUp()
# Create project with component
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.parent = factories.ProjectFactory(creator=self.user)
self.node = factories.NodeFactory(creator=self.user, parent=self.parent)
def test_has_permission_on_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(non_admin_user, 'read')
)
def test_check_user_has_permission_excludes_deleted_components(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.is_deleted = True
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_does_not_have_permission_on_private_node_child(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_child_node_permissions_false_if_no_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_admin_has_permissions_on_private_component(self):
parent = factories.ProjectFactory()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(parent.creator,'read')
)
def test_check_user_private_node_child_permissions_excludes_pointers(self):
user = factories.UserFactory()
parent = factories.ProjectFactory()
pointed = factories.ProjectFactory(contributor=user)
parent.add_pointer(pointed, Auth(parent.creator))
parent.save()
assert_false(
parent.has_permission_on_children(user,'read')
)
def test_new_project_creator_is_subscribed(self):
user = factories.UserFactory()
factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 2) # subscribed to both file_updated and comments
assert_in('file_updated', event_types)
assert_in('comments', event_types)
def test_new_node_creator_is_not_subscribed(self):
user = factories.UserFactory()
factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
assert_equal(len(user_subscriptions), 0)
def test_new_project_creator_is_subscribed_with_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
owner=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_digest')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
owner=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'none')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_digest')
node = factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(user_subscriptions), 5) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_mentions', event_types)
assert_equal(len(file_updated_subscription.none), 1)
assert_equal(len(file_updated_subscription.email_transactional), 0)
assert_equal(len(comments_subscription.email_digest), 1)
assert_equal(len(comments_subscription.email_transactional), 0)
def test_new_node_creator_is_not_subscribed_with_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
owner=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_digest')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
owner=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'none')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
owner=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 4) # subscribed to only user settings
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
def test_new_project_creator_is_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
owner=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
owner=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
owner=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(user_subscriptions), 6) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
assert_equal(len(file_updated_subscription.email_transactional), 1)
assert_equal(len(comments_subscription.email_transactional), 1)
def test_new_fork_creator_is_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
project = factories.ProjectFactory(creator=user)
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
owner=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
owner=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ForkFactory(project=project)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
node_file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
node_comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
project_file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', project._id + '_file_updated'))
project_comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', project._id + '_comments'))
assert_equal(len(user_subscriptions), 7) # subscribed to project, fork, and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_mentions', event_types)
assert_equal(len(node_file_updated_subscription.email_transactional), 1)
assert_equal(len(node_comments_subscription.email_transactional), 1)
assert_equal(len(project_file_updated_subscription.email_transactional), 1)
assert_equal(len(project_comments_subscription.email_transactional), 1)
def test_new_node_creator_is_not_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
owner=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
owner=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
owner=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 4) # subscribed to only user settings
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
def test_contributor_subscribed_when_added_to_project(self):
user = factories.UserFactory()
contributor = factories.UserFactory()
project = factories.ProjectFactory(creator=user)
project.add_contributor(contributor=contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(contributor))
event_types = [sub.event_name for sub in contributor_subscriptions]
assert_equal(len(contributor_subscriptions), 2)
assert_in('file_updated', event_types)
assert_in('comments', event_types)
def test_contributor_subscribed_when_added_to_component(self):
user = factories.UserFactory()
contributor = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=contributor._id + '_' + 'global_comments',
owner=contributor,
event_name='global_comments'
).add_user_to_subscription(contributor, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=contributor._id + '_' + 'global_file_updated',
owner=contributor,
event_name='global_file_updated'
).add_user_to_subscription(contributor, 'email_transactional')
node = factories.NodeFactory(creator=user)
node.add_contributor(contributor=contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(contributor))
event_types = [sub.event_name for sub in contributor_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(contributor_subscriptions), 4) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_equal(len(file_updated_subscription.email_transactional), 1)
assert_equal(len(comments_subscription.email_transactional), 1)
def test_unregistered_contributor_not_subscribed_when_added_to_project(self):
user = factories.UserFactory()
unregistered_contributor = factories.UnregUserFactory()
project = factories.ProjectFactory(creator=user)
project.add_contributor(contributor=unregistered_contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(unregistered_contributor))
assert_equal(len(contributor_subscriptions), 0)
class TestSubscriptionView(OsfTestCase):
def setUp(self):
super(TestSubscriptionView, self).setUp()
self.node = factories.NodeFactory()
self.user = self.node.creator
def test_create_new_subscription(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# check that user was added to notification_type field
assert_equal(payload['id'], s.owner._id)
assert_equal(payload['event'], s.event_name)
assert_in(self.node.creator, getattr(s, payload['notification_type']))
# change subscription
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_digest'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
assert_false(self.node.creator in getattr(s, payload['notification_type']))
assert_in(self.node.creator, getattr(s, new_payload['notification_type']))
def test_adopt_parent_subscription_default(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
event_id = self.node._id + '_' + 'comments'
# confirm subscription was created because parent had default subscription
s = NotificationSubscription.find(Q('_id', 'eq', event_id)).count()
assert_equal(0, s)
def test_change_subscription_to_adopt_parent_subscription_removes_user(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# change subscription to adopt_parent
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
# assert that user is removed from the subscription entirely
for n in constants.NOTIFICATION_TYPES:
assert_false(self.node.creator in getattr(s, n))
def test_configure_subscription_adds_node_id_to_notifications_configured(self):
project = factories.ProjectFactory(creator=self.user)
assert_false(project._id in self.user.notifications_configured)
payload = {
'id': project._id,
'event': 'comments',
'notification_type': 'email_digest'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=project.creator.auth)
self.user.reload()
assert_true(project._id in self.user.notifications_configured)
class TestRemoveContributor(OsfTestCase):
def setUp(self):
super(OsfTestCase, self).setUp()
self.project = factories.ProjectFactory()
self.contributor = factories.UserFactory()
self.project.add_contributor(contributor=self.contributor, permissions=['read'])
self.project.save()
self.subscription = NotificationSubscription.find_one(
Q('owner', 'eq', self.project) &
Q('_id', 'eq', self.project._id + '_comments')
)
self.node = factories.NodeFactory(parent=self.project)
self.node.add_contributor(contributor=self.project.creator, permissions=['read', 'write', 'admin'])
self.node.save()
self.node_subscription = NotificationSubscription.find_one(Q(
'_id', 'eq', self.node._id + '_comments') & Q('owner', 'eq', self.node)
)
self.node_subscription.add_user_to_subscription(self.node.creator, 'email_transactional')
def test_removed_non_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.contributor, self.subscription.email_transactional)
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_not_in(self.contributor, self.project.contributors)
self.subscription.reload()
assert_not_in(self.contributor, self.subscription.email_transactional)
def test_removed_non_parent_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.node.creator, self.node_subscription.email_transactional)
self.node.remove_contributor(self.node.creator, auth=Auth(self.node.creator))
assert_not_in(self.node.creator, self.node.contributors)
self.node_subscription.reload()
assert_not_in(self.node.creator, self.node_subscription.email_transactional)
def test_removed_contributor_admin_on_parent_not_removed_from_node_subscription(self):
# Admin on parent project is removed as a contributor on a component. Check
# that admin is not removed from component subscriptions, as the admin
# now has read-only access.
assert_in(self.project.creator, self.node_subscription.email_transactional)
self.node.remove_contributor(self.project.creator, auth=Auth(self.project.creator))
assert_not_in(self.project.creator, self.node.contributors)
assert_in(self.project.creator, self.node_subscription.email_transactional)
def test_remove_contributor_signal_called_when_contributor_is_removed(self):
with capture_signals() as mock_signals:
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
class TestRemoveNodeSignal(OsfTestCase):
def test_node_subscriptions_and_backrefs_removed_when_node_is_deleted(self):
project = factories.ProjectFactory()
s = NotificationSubscription.find(Q('email_transactional', 'eq', project.creator._id))
assert_equal(s.count(), 2)
with capture_signals() as mock_signals:
project.remove_node(auth=Auth(project.creator))
assert_true(project.is_deleted)
assert_equal(mock_signals.signals_sent(), set([node_deleted]))
s = NotificationSubscription.find(Q('email_transactional', 'eq', project.creator._id))
assert_equal(s.count(), 0)
with assert_raises(NoResultsFound):
NotificationSubscription.find_one(Q('owner', 'eq', project))
def list_or_dict(data):
# Generator only returns lists or dicts from list or dict
if isinstance(data, dict):
for key in data:
if isinstance(data[key], dict) or isinstance(data[key], list):
yield data[key]
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) or isinstance(item, list):
yield item
def has(data, sub_data):
# Recursive approach to look for a subset of data in data.
# WARNING: Don't use on huge structures
# :param data: Data structure
# :param sub_data: subset being checked for
# :return: True or False
try:
(item for item in data if item == sub_data).next()
return True
except StopIteration:
lists_and_dicts = list_or_dict(data)
for item in lists_and_dicts:
if has(item, sub_data):
return True
return False
def subscription_schema(project, structure, level=0):
# builds a schema from a list of nodes and events
# :param project: validation type
# :param structure: list of nodes (another list) and events
# :return: schema
sub_list = []
for item in list_or_dict(structure):
sub_list.append(subscription_schema(project, item, level=level+1))
sub_list.append(event_schema(level))
node_schema = {
'node': {
'id': Use(type(project._id), error="node_id{}".format(level)),
'title': Use(type(project.title), error="node_title{}".format(level)),
'url': Use(type(project.url), error="node_{}".format(level))
},
'kind': And(str, Use(lambda s: s in ('node', 'folder'),
error="kind didn't match node or folder {}".format(level))),
'nodeType': Use(lambda s: s in ('project', 'component'), error='nodeType not project or component'),
'category': Use(lambda s: s in settings.NODE_CATEGORY_MAP, error='category not in settings.NODE_CATEGORY_MAP'),
'permissions': {
'view': Use(lambda s: s in (True, False), error='view permissions is not True/False')
},
'children': sub_list
}
if level == 0:
return Schema([node_schema])
return node_schema
def event_schema(level=None):
return {
'event': {
'title': And(Use(str, error="event_title{} not a string".format(level)),
Use(lambda s: s in constants.NOTIFICATION_TYPES,
error="event_title{} not in list".format(level))),
'description': And(Use(str, error="event_desc{} not a string".format(level)),
Use(lambda s: s in constants.NODE_SUBSCRIPTIONS_AVAILABLE,
error="event_desc{} not in list".format(level))),
'notificationType': And(str, Or('adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)),
'parent_notification_type': Or(None, 'adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)
},
'kind': 'event',
'children': And(list, lambda l: len(l) == 0)
}
class TestNotificationUtils(OsfTestCase):
def setUp(self):
super(TestNotificationUtils, self).setUp()
self.user = factories.UserFactory()
self.project = factories.ProjectFactory(creator=self.user)
self.project_subscription = NotificationSubscription.find_one(
Q('owner', 'eq', self.project) &
Q('_id', 'eq', self.project._id + '_comments') &
Q('event_name', 'eq', 'comments')
)
self.user.notifications_configured[self.project._id] = True
self.user.save()
self.node = factories.NodeFactory(parent=self.project, creator=self.user)
self.node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_' + 'comments',
owner=self.node,
event_name='comments'
)
self.node_comments_subscription.save()
self.node_comments_subscription.email_transactional.append(self.user)
self.node_comments_subscription.save()
self.node_subscription = list(NotificationSubscription.find(Q('owner', 'eq', self.node)))
self.user_subscription = [factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'comment_replies',
owner=self.user,
event_name='comment_replies'
),
factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_comment',
owner=self.user,
event_name='global_comment'
),
factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_file_updated',
owner=self.user,
event_name='global_file_updated'
)]
for x in self.user_subscription:
x.save()
for x in self.user_subscription:
x.email_transactional.append(self.user)
for x in self.user_subscription:
x.save()
def test_to_subscription_key(self):
key = utils.to_subscription_key('xyz', 'comments')
assert_equal(key, 'xyz_comments')
def test_from_subscription_key(self):
parsed_key = utils.from_subscription_key('xyz_comment_replies')
assert_equal(parsed_key, {
'uid': 'xyz',
'event': 'comment_replies'
})
def test_get_all_user_subscriptions(self):
user_subscriptions = list(utils.get_all_user_subscriptions(self.user))
assert_in(self.project_subscription, user_subscriptions)
assert_in(self.node_comments_subscription, user_subscriptions)
for x in self.user_subscription:
assert_in(x, user_subscriptions)
assert_equal(len(user_subscriptions), 6)
def test_get_all_node_subscriptions_given_user_subscriptions(self):
user_subscriptions = utils.get_all_user_subscriptions(self.user)
node_subscription_ids = [x._id for x in utils.get_all_node_subscriptions(self.user, self.node,
user_subscriptions=user_subscriptions)]
expected_node_subscription_ids = [x._id for x in self.node_subscription]
assert_items_equal(node_subscription_ids, expected_node_subscription_ids)
def test_get_all_node_subscriptions_given_user_and_node(self):
node_subscription_ids = [x._id for x in utils.get_all_node_subscriptions(self.user, self.node)]
expected_node_subscription_ids = [x._id for x in self.node_subscription]
assert_items_equal(node_subscription_ids, expected_node_subscription_ids)
def test_get_configured_project_ids_does_not_return_user_or_node_ids(self):
configured_ids = utils.get_configured_projects(self.user)
# No dupilcates!
assert_equal(len(configured_ids), 1)
assert_in(self.project._id, configured_ids)
assert_not_in(self.node._id, configured_ids)
assert_not_in(self.user._id, configured_ids)
def test_get_configured_project_ids_excludes_deleted_projects(self):
project = factories.ProjectFactory()
project.is_deleted = True
project.save()
assert_not_in(project._id, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_excludes_node_with_project_category(self):
node = factories.NodeFactory(parent=self.project, category='project')
assert_not_in(node._id, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_includes_top_level_private_projects_if_subscriptions_on_node(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
owner=node,
event_name='comments'
)
node_comments_subscription.save()
node_comments_subscription.email_transactional.append(node.creator)
node_comments_subscription.save()
node.creator.notifications_configured[node._id] = True
node.creator.save()
configured_project_ids = utils.get_configured_projects(node.creator)
assert_in(private_project._id, configured_project_ids)
def test_get_configured_project_ids_excludes_private_projects_if_no_subscriptions_on_node(self):
user = factories.UserFactory()
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node.add_contributor(user)
utils.remove_contributor_from_subscriptions(node, user)
configured_project_ids = utils.get_configured_projects(user)
assert_not_in(private_project._id, configured_project_ids)
def test_get_parent_notification_type(self):
nt = utils.get_parent_notification_type(self.node, 'comments', self.user)
assert_equal(nt, 'email_transactional')
def test_get_parent_notification_type_no_parent_subscriptions(self):
node = factories.NodeFactory()
nt = utils.get_parent_notification_type(node._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_no_parent(self):
project = factories.ProjectFactory()
nt = utils.get_parent_notification_type(project._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_handles_user_id(self):
nt = utils.get_parent_notification_type(self.user._id, 'comments', self.user)
assert_equal(nt, None)
def test_format_data_project_settings(self):
data = utils.format_data(self.user, [self.project._id])
parent_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
child_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
expected_new = [['event'], 'event']
schema = subscription_schema(self.project, expected_new)
assert schema.validate(data)
assert has(data, parent_event)
assert has(data, child_event)
def test_format_data_node_settings(self):
data = utils.format_data(self.user, [self.node._id])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_includes_admin_view_only_component_subscriptions(self):
# Test private components in which parent project admins are not contributors still appear in their
# notifications settings.
node = factories.NodeFactory(parent=self.project)
data = utils.format_data(self.user, [self.project._id])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event'], ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_data_excludes_pointers(self):
project = factories.ProjectFactory()
pointed = factories.ProjectFactory()
project.add_pointer(pointed, Auth(project.creator))
project.creator.notifications_configured[project._id] = True
project.creator.save()
configured_project_ids = utils.get_configured_projects(project.creator)
data = utils.format_data(project.creator, configured_project_ids)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_data_user_subscriptions_includes_private_parent_if_configured_children(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
owner=node,
event_name='comments'
)
node_comments_subscription.save()
node_comments_subscription.email_transactional.append(node.creator)
node_comments_subscription.save()
node.creator.notifications_configured[node._id] = True
node.creator.save()
configured_project_ids = utils.get_configured_projects(node.creator)
data = utils.format_data(node.creator, configured_project_ids)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_user_subscriptions(self):
data = utils.format_user_subscriptions(self.user)
expected = [
{
'event': {
'title': 'global_file_updated',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_file_updated'],
'notificationType': 'email_transactional',
'parent_notification_type': None,
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_mentions',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_mentions'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_comments',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
},
]
assert_items_equal(data, expected)
def test_get_global_notification_type(self):
notification_type = utils.get_global_notification_type(self.user_subscription[1] ,self.user)
assert_equal('email_transactional', notification_type)
def test_check_if_all_global_subscriptions_are_none_false(self):
all_global_subscriptions_none = utils.check_if_all_global_subscriptions_are_none(self.user)
assert_false(all_global_subscriptions_none)
def test_check_if_all_global_subscriptions_are_none_true(self):
for x in self.user_subscription:
x.none.append(self.user)
x.email_transactional.remove(self.user)
for x in self.user_subscription:
x.save()
all_global_subscriptions_none = utils.check_if_all_global_subscriptions_are_none(self.user)
assert_true(all_global_subscriptions_none)
def test_format_data_user_settings(self):
data = utils.format_user_and_project_subscriptions(self.user)
expected = [
{
'node': {
'id': self.user._id,
'title': 'Default Notification Settings',
'help': 'These are default settings for new projects you create or are added to. Modifying these settings will not modify settings on existing projects.'
},
'kind': 'heading',
'children': utils.format_user_subscriptions(self.user)
},
{
'node': {
'help': 'These are settings for each of your projects. Modifying these settings will only modify the settings for the selected project.',
'id': '',
'title': 'Project Notifications'
},
'kind': 'heading',
'children': utils.format_data(self.user, utils.get_configured_projects(self.user))
}]
assert_equal(data, expected)
def test_serialize_user_level_event(self):
user_subscriptions = [x for x in utils.get_all_user_subscriptions(self.user)]
user_subscription = None
for subscription in user_subscriptions:
if 'global_comment_replies' in getattr(subscription, 'event_name'):
user_subscription = subscription
data = utils.serialize_event(self.user, event_description='global_comment_replies',
subscription=user_subscription)
expected = {
'event': {
'title': 'global_comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
assert_equal(data, expected)
def test_serialize_node_level_event(self):
node_subscriptions = [x for x in utils.get_all_node_subscriptions(self.user, self.node)]
data = utils.serialize_event(user=self.user, event_description='comments',
subscription=node_subscriptions[0], node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
def test_serialize_node_level_event_that_adopts_parent_settings(self):
user = factories.UserFactory()
self.project.add_contributor(contributor=user, permissions=['read'])
self.project.save()
self.node.add_contributor(contributor=user, permissions=['read'])
self.node.save()
# set up how it was in original test - remove existing subscriptions
utils.remove_contributor_from_subscriptions(self.node, user)
node_subscriptions = [x for x in utils.get_all_node_subscriptions(user, self.node)]
data = utils.serialize_event(user=user, event_description='comments',
subscription=node_subscriptions, node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
class TestNotificationsDict(OsfTestCase):
def test_notifications_dict_add_message_returns_proper_format(self):
d = utils.NotificationsDict()
message = {
'message': 'Freddie commented on your project',
'timestamp': datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
}
message2 = {
'message': 'Mercury commented on your component',
'timestamp': datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
}
d.add_message(['project'], message)
d.add_message(['project', 'node'], message2)
expected = {
'messages': [],
'children': collections.defaultdict(
utils.NotificationsDict, {
'project': {
'messages': [message],
'children': collections.defaultdict(utils.NotificationsDict, {
'node': {
'messages': [message2],
'children': collections.defaultdict(utils.NotificationsDict, {})
}
})
}
}
)}
assert_equal(d, expected)
class TestCompileSubscriptions(NotificationTestCase):
def setUp(self):
super(TestCompileSubscriptions, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.user_3 = factories.UserFactory()
self.user_4 = factories.UserFactory()
# Base project + 1 project shared with 3 + 1 project shared with 2
self.base_project = factories.ProjectFactory(is_public=False, creator=self.user_1)
self.shared_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
# Adding contributors
for node in [self.base_project, self.shared_node, self.private_node]:
node.add_contributor(self.user_2, permissions='admin')
self.base_project.add_contributor(self.user_3, permissions='write')
self.shared_node.add_contributor(self.user_3, permissions='write')
# Setting basic subscriptions
self.base_sub = factories.NotificationSubscriptionFactory(
_id=self.base_project._id + '_file_updated',
owner=self.base_project,
event_name='file_updated'
)
self.base_sub.save()
self.shared_sub = factories.NotificationSubscriptionFactory(
_id=self.shared_node._id + '_file_updated',
owner=self.shared_node,
event_name='file_updated'
)
self.shared_sub.save()
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
owner=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
def test_no_subscription(self):
node = factories.NodeFactory()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_no_subscribers(self):
node = factories.NodeFactory()
node_sub = factories.NotificationSubscriptionFactory(
_id=node._id + '_file_updated',
owner=node,
event_name='file_updated'
)
node_sub.save()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_parent(self):
# Basic sub check
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.base_project, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_parent_from_child(self):
# checks the parent sub is the one to appear without a child sub
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_both_from_child(self):
# checks that only one sub is in the list.
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
self.shared_sub.email_transactional.append(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_diff_subs_to_both_from_child(self):
# Check that the child node sub overrides the parent node sub
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
self.shared_sub.none.append(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [self.user_1._id], 'email_digest': []}, result)
def test_user_wo_permission_on_child_node_not_listed(self):
# Tests to see if a user without permission gets an Email about a node they cannot see.
self.base_sub.email_transactional.append(self.user_3)
self.base_sub.save()
result = emails.compile_subscriptions(self.private_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_several_nodes_deep(self):
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [self.user_1._id], 'email_digest': [], 'none': []})
def test_several_nodes_deep_precedence(self):
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node4_subscription = factories.NotificationSubscriptionFactory(
_id=node4._id + '_file_updated',
owner=node4,
event_name='file_updated'
)
node4_subscription.save()
node4_subscription.email_digest.append(self.user_1)
node4_subscription.save()
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [], 'email_digest': [self.user_1._id], 'none': []})
class TestMoveSubscription(NotificationTestCase):
def setUp(self):
super(TestMoveSubscription, self).setUp()
self.blank = {key: [] for key in constants.NOTIFICATION_TYPES} # For use where it is blank.
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.project, is_public=False, creator=self.user_1)
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.sub.email_transactional.extend([self.user_1])
self.sub.save()
self.file_sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_xyz42_file_updated',
owner=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_separate_users(self):
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
subbed, removed = utils.separate_users(
self.private_node, [self.user_2._id, self.user_3._id, self.user_4._id]
)
assert_equal([self.user_2._id, self.user_3._id], subbed)
assert_equal([self.user_4._id], removed)
def test_event_subs_same(self):
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_event_nodes_same(self):
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.project)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': []}, results)
def test_move_sub(self):
# Tests old sub is replaced with new sub.
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal('abc42_file_updated', self.file_sub.event_name)
assert_equal(self.private_node, self.file_sub.owner)
assert_equal(self.private_node._id + '_abc42_file_updated', self.file_sub._id)
def test_move_sub_with_none(self):
# Attempt to reproduce an error that is seen when moving files
self.project.add_contributor(self.user_2, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.none.append(self.user_2)
self.file_sub.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': [self.user_2._id]}, results)
def test_remove_one_user(self):
# One user doesn't have permissions on the node the sub is moved to. Should be listed.
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_remove_one_user_warn_another(self):
# Two users do not have permissions on new node, but one has a project sub. Both should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
self.file_sub.email_transactional.extend([self.user_2, self.user_4])
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_in(self.user_3, self.sub.email_digest) # Is not removed from the project subscription.
def test_warn_user(self):
# One user with a project sub does not have permission on new node. User should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
self.file_sub.email_transactional.extend([self.user_2])
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_in(self.user_3, self.sub.email_digest) # Is not removed from the project subscription.
def test_user_node_subbed_and_not_removed(self):
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal([], self.file_sub.email_digest)
class TestSendEmails(NotificationTestCase):
def setUp(self):
super(TestSendEmails, self).setUp()
self.user = factories.AuthUserFactory()
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_' + 'comments',
owner=self.project,
event_name='comments'
)
self.project_subscription.save()
self.project_subscription.email_transactional.append(self.project.creator)
self.project_subscription.save()
self.node = factories.NodeFactory(parent=self.project)
self.node_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_comments',
owner=self.node,
event_name='comments'
)
self.node_subscription.save()
self.user_subscription = factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_comment_replies',
owner=self.user,
event_name='global_comment_replies',
email_transactional=[self.user._id]
)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscription(self, mock_store):
node = factories.ProjectFactory()
user = factories.AuthUserFactory()
emails.notify('comments', user=user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscribers(self, mock_store):
node = factories.NodeFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.save()
emails.notify('comments', user=self.user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_with_correct_args(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now)
assert_true(mock_store.called)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', self.user,
self.node, time_now)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_to_users_subscribed_to_none(self, mock_store):
node = factories.NodeFactory()
user = factories.UserFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.save()
node_subscription.none.append(user)
node_subscription.save()
sent = emails.notify('comments', user=user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
assert_equal(sent, [])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_mentions_does_not_send_to_mentioned_users_subscribed_to_none(self, mock_store):
node = factories.NodeFactory()
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'none')
time_now = datetime.datetime.utcnow()
sent = emails.notify_mentions('global_mentions', user=user, node=node, timestamp=time_now, new_mentions=[user._id])
assert_false(mock_store.called)
assert_equal(sent, [])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_mentions_does_send_to_mentioned_users(self, mock_store):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_global_mentions',
owner=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ProjectFactory(creator=user)
time_now = datetime.datetime.utcnow()
emails.notify_mentions('global_mentions', user=user, node=node, timestamp=time_now, new_mentions=[user._id])
assert_true(mock_store.called)
mock_store.assert_called_with([node.creator._id], 'email_transactional', 'mentions', user,
node, time_now, new_mentions=[node.creator._id])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_event_if_comment_is_direct_reply(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now, target_user=self.project.creator)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comment_replies',
self.user, self.node, time_now, target_user=self.project.creator)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_when_target_user_is_subscribed_via_user_settings(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('global_comment_replies', user=self.project.creator, node=self.node, timestamp=time_now, target_user=self.user)
mock_store.assert_called_with([self.user._id], 'email_transactional', 'comment_replies',
self.project.creator, self.node, time_now, target_user=self.user)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply(self, mock_store):
user = factories.UserFactory()
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=user, node=self.node, timestamp=time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
@mock.patch('website.mails.send_mail')
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_comment_if_they_reply_to_their_own_comment(self, mock_store, mock_send_mail):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.project.creator, node=self.project, timestamp=time_now,
target_user=self.project.creator)
assert_false(mock_store.called)
assert_false(mock_send_mail.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply_on_component(self, mock_store):
# Test that comment replies on components that are not direct replies to the subscriber use the
# "comments" email template.
user = factories.UserFactory()
time_now = datetime.datetime.utcnow()
emails.notify('comments', user, self.node, time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
def test_check_node_node_none(self):
subs = emails.check_node(None, 'comments')
assert_equal(subs, {'email_transactional': [], 'email_digest': [], 'none': []})
def test_check_node_one(self):
subs = emails.check_node(self.project, 'comments')
assert_equal(subs, {'email_transactional': [self.project.creator._id], 'email_digest': [], 'none': []})
@mock.patch('website.project.views.comment.notify')
def test_check_user_comment_reply_subscription_if_email_not_sent_to_target_user(self, mock_notify):
# user subscribed to comment replies
user = factories.UserFactory()
user_subscription = factories.NotificationSubscriptionFactory(
_id=user._id + '_comments',
owner=user,
event_name='comment_replies'
)
user_subscription.email_transactional.append(user)
user_subscription.save()
# user is not subscribed to project comment notifications
project = factories.ProjectFactory()
# user comments on project
target = factories.CommentFactory(node=project, user=user)
content = 'hammer to fall'
# reply to user (note: notify is called from Comment.create)
reply = Comment.create(
auth=Auth(project.creator),
user=project.creator,
node=project,
content=content,
target=Guid.load(target._id),
root_target=Guid.load(project._id),
is_public=True,
)
assert_true(mock_notify.called)
assert_equal(mock_notify.call_count, 2)
def test_get_settings_url_for_node(self):
url = emails.get_settings_url(self.project._id, self.user)
assert_equal(url, self.project.absolute_url + 'settings/')
def test_get_settings_url_for_user(self):
url = emails.get_settings_url(self.user._id, self.user)
assert_equal(url, web_url_for('user_notifications', _absolute=True))
def test_get_node_lineage(self):
node_lineage = emails.get_node_lineage(self.node)
assert_equal(node_lineage, [self.project._id, self.node._id])
def test_localize_timestamp(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'America/New_York'
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_timezone(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = ''
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone('Etc/UTC')
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_locale(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'America/New_York'
self.user.locale = ''
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale('en')
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_handles_unicode(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'Europe/Moscow'
self.user.locale = 'ru_RU'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
class TestSendDigest(OsfTestCase):
def setUp(self):
super(TestSendDigest, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.project = factories.ProjectFactory()
self.timestamp = datetime.datetime.utcnow()
def test_group_notifications_by_user_transactional(self):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
user_id=self.user_1._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type='email_digest',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is a digest)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': self.user_1._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d._id
}]
},
{
u'user_id': self.user_2._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d2._id
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
def test_group_notifications_by_user_digest(self):
send_type = 'email_digest'
d = factories.NotificationDigestFactory(
user_id=self.user_1._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type='email_transactional',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is transactional)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': self.user_1._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d._id
}]
},
{
u'user_id': self.user_2._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d2._id
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
@mock.patch('website.mails.send_mail')
def test_send_users_email_called_with_correct_args(self, mock_send_mail):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
user_id=factories.UserFactory()._id,
send_type=send_type,
timestamp=datetime.datetime.utcnow(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
d.save()
user_groups = get_users_emails(send_type)
send_users_email(send_type)
assert_true(mock_send_mail.called)
assert_equals(mock_send_mail.call_count, len(user_groups))
last_user_index = len(user_groups) - 1
user = User.load(user_groups[last_user_index]['user_id'])
email_notification_ids = [message['_id'] for message in user_groups[last_user_index]['info']]
args, kwargs = mock_send_mail.call_args
assert_equal(kwargs['to_addr'], user.username)
assert_equal(kwargs['mimetype'], 'html')
assert_equal(kwargs['mail'], mails.DIGEST)
assert_equal(kwargs['name'], user.fullname)
message = group_by_node(user_groups[last_user_index]['info'])
assert_equal(kwargs['message'], message)
assert_equal(kwargs['callback'], remove_notifications(email_notification_ids=email_notification_ids))
def test_remove_sent_digest_notifications(self):
d = factories.NotificationDigestFactory(
user_id=factories.UserFactory()._id,
timestamp=datetime.datetime.utcnow(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
digest_id = d._id
remove_notifications(email_notification_ids=[digest_id])
with assert_raises(NoResultsFound):
NotificationDigest.find_one(Q('_id', 'eq', digest_id))
|
macronucleus/chromagnon | refs/heads/master | Chromagnon/Priithon/fileDropPopup.py | 1 | """
Priithon pyshell / view / view2 support file drag-and-drop
-> a popup menu presents a choice of what to do
"""
__author__ = "Sebastian Haase <haase@msg.ucsf.edu>"
__license__ = "BSD license - see LICENSE file"
import wx
NO_SPECIAL_GUI_EXCEPT = True # instead rely on Priithon's guiExceptionFrame (Y._fixGuiExceptHook())
Menu_paste = wx.NewId()
Menu_view = wx.NewId()
Menu_view2 = wx.NewId()
Menu_assign = wx.NewId()
Menu_assignFN = wx.NewId()
Menu_assignList= wx.NewId()
Menu_dir = wx.NewId()
Menu_cd = wx.NewId()
Menu_appSysPath = wx.NewId()
Menu_exec = wx.NewId()
Menu_import = wx.NewId()
Menu_importAs = wx.NewId()
Menu_editor = wx.NewId()
Menu_editor2 = wx.NewId()
Menu_assignSeq = wx.NewId()
Menu_viewSeq = wx.NewId()
#seb : File drag and drop
class FileDropTarget(wx.FileDropTarget):
def __init__(self, parent, pyshell=None):
wx.FileDropTarget.__init__(self)
self.parent = parent
if pyshell is not None:
self.pyshell = pyshell
else:
import __main__
if hasattr(__main__, 'shell'):
self.pyshell = __main__.shell
else:
self.pyshell = None
def OnDropFiles(self, x, y, filenames):
if len(filenames) == 1:
self.txt = 'r\"%s\"' % filenames[0]
else:
self.txt = '[ '
for f in filenames:
self.txt += 'r\"%s\" , ' % f
self.txt += ']'
# #wx26 n = len(txt)
# #wx26 self.pyshell.AppendText(n, txt)
# self.pyshell.AppendText(txt)
# pos = self.pyshell.GetCurrentPos() + len(txt)
# self.pyshell.SetCurrentPos( pos )
# self.pyshell.SetSelection( pos, pos )
m = wx.Menu()
import os
if len(filenames) == 1:
self.fn_or_fns = filenames[0]
## filenames = filenames[0] # danger : misleading name (plural used for a single filename)
fUPPER_ending = self.fn_or_fns[-5:].upper()
if os.path.isdir(self.fn_or_fns):
m.Append(Menu_dir, "open directory-list-viewer")
m.Append(Menu_cd, "change working directory")
m.Append(Menu_appSysPath, "append to sys.path")
m.Append(Menu_assignFN, "assign dirname to var")
elif fUPPER_ending.endswith('.PY') or \
fUPPER_ending.endswith('.PYW') or \
fUPPER_ending.endswith('.PYC'):
m.Append(Menu_exec, "execute py-file")
m.Append(Menu_import, "import")
m.Append(Menu_importAs, "import as ...")
m.Append(Menu_editor, "edit py-file")
m.Append(Menu_assignFN, "assign filename to var")
else:
m.Append(Menu_assign, "load and assign to var")
m.Append(Menu_view, "view")
m.Append(Menu_view2, "view multi-color")
m.Append(Menu_editor2, "edit text file")
m.Append(Menu_assignFN, "assign filename to var")
else:
self.fn_or_fns = filenames
m.Append(Menu_view, "view files separately")
m.Append(Menu_view2, "view files as one multi-color")
m.Append(Menu_assignSeq, "load and assign img seq into one array var")
m.Append(Menu_viewSeq, "view image sequence")
m.Append(Menu_assignList, "assign list of names to var")
m.Append(Menu_paste, "paste")
# 20180114 deprecation warning
self.parent.Bind(wx.EVT_MENU, self.onAssign, id=Menu_assign)
self.parent.Bind(wx.EVT_MENU, self.onAssignFN, id=Menu_assignFN)
self.parent.Bind(wx.EVT_MENU, self.onAssignList, id=Menu_assignList)
self.parent.Bind(wx.EVT_MENU, self.onPaste, id=Menu_paste)
self.parent.Bind(wx.EVT_MENU, self.onView, id=Menu_view)
self.parent.Bind(wx.EVT_MENU, self.onView2, id=Menu_view2)
self.parent.Bind(wx.EVT_MENU, self.onDir, id=Menu_dir)
self.parent.Bind(wx.EVT_MENU, self.onCd, id=Menu_cd)
self.parent.Bind(wx.EVT_MENU, self.onAppSysPath, id=Menu_appSysPath)
self.parent.Bind(wx.EVT_MENU, self.onAssignSeq, id=Menu_assignSeq)
self.parent.Bind(wx.EVT_MENU, self.onViewSeq, id=Menu_viewSeq)
self.parent.Bind(wx.EVT_MENU, self.onExe, id=Menu_exec)
self.parent.Bind(wx.EVT_MENU, self.onImport, id=Menu_import)
self.parent.Bind(wx.EVT_MENU, self.onImportAs, id=Menu_importAs)
self.parent.Bind(wx.EVT_MENU, self.onEditor, id=Menu_editor)
self.parent.Bind(wx.EVT_MENU, self.onEditor2, id=Menu_editor2)
if wx.version().startswith('3'):
self.parent.PopupMenuXY(m, x,y)
else:
self.parent.PopupMenu(m, x,y)
return True # 20180114 wxpython Phoenix
def onPaste(self, ev):
try:
self.pyshell.AppendText(self.txt)
except:
n = len(self.txt)
self.pyshell.AppendText(n, self.txt)
pos = self.pyshell.GetCurrentPos() + len(self.txt)
self.pyshell.SetCurrentPos( pos )
self.pyshell.SetSelection( pos, pos )
def onView(self, ev):
from Priithon.all import Y
Y.view(self.fn_or_fns) # 'list' for "view separately"
self.pyshell.addHistory("Y.view( %s )"%(self.txt,))
def onView2(self, ev):
from Priithon.all import Y
if isinstance(self.fn_or_fns, (list, tuple)) and len(self.fn_or_fns) > 1:
f = tuple( self.fn_or_fns ) # 'tuple' for "view as mock-adarray"
self.txt = '( '
for fff in self.fn_or_fns:
self.txt += 'r\"%s\" , ' % fff
self.txt += ')'
else:
f = self.fn_or_fns
Y.view2(f, colorAxis='smart')
self.pyshell.addHistory("Y.view2( %s, colorAxis='smart')"%(self.txt,))
def onDir(self, ev):
from Priithon.all import Y
Y.listFilesViewer(self.fn_or_fns)
self.pyshell.addHistory("Y.listFilesViewer(r\"%s\")"%(self.fn_or_fns,))
def onCd(self, ev):
import os
os.chdir( self.fn_or_fns )
self.pyshell.addHistory("os.chdir(r\"%s\")"%(self.fn_or_fns,))
def onAppSysPath(self, ev):
import sys
from Priithon.all import Y
sys.path.append( self.fn_or_fns )
s = "sys.path.append(r\"%s\")"% (self.fn_or_fns,)
Y.shellMessage("### %s\n"% s)
self.pyshell.addHistory(s)
def onAssign(self, ev):
fn = self.fn_or_fns
from Priithon.all import Y
a = Y.load(fn)
if a is not None:
v = Y.assignNdArrToVarname(a, "Y.load( r'%s' )"%fn)
if v is not None:
self.pyshell.addHistory("%s = Y.load( r'%s' )"%(v,fn))
def onAssignFN(self, ev):
v = wx.GetTextFromUser("assign filename to varname:", 'new variable')
if not v:
return
import __main__
try:
exec('%s = %s' % (v,self.txt), __main__.__dict__)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error when assigning filename to __main__.%s: %s - %s" %\
(v, str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
from Priithon.all import Y
s = "%s = %s"% (v, self.txt)
Y.shellMessage("### %s\n"% (s,))
self.pyshell.addHistory(s)
def onAssignList(self, ev):
v = wx.GetTextFromUser("assign list to varname:", 'new variable')
if not v:
return
import __main__
try:
exec('%s = %s' % (v, self.fn_or_fns), __main__.__dict__)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error when assigning list to __main__.%s: %s - %s" %\
(v, str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
from Priithon.all import Y
Y.shellMessage("### %s = <list of files>\n"% (v,))
def onAssignSeq(self, ev):
from Priithon.all import Y
v = wx.GetTextFromUser("assign image sequence to array varname:", 'new variable')
if not v:
return
import __main__
try:
exec('%s = U.loadImg_seq(%s)' % (v,self.fn_or_fns), __main__.__dict__)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error when loading and assigning img.seq. to __main__.%s: %s - %s" %\
(v, str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
Y.shellMessage("### %s = U.loadImg_seq(<list of files>)\n"% (v,))
def onViewSeq(self, ev):
from Priithon.all import Y,U
try:
Y.view( U.loadImg_seq( self.fn_or_fns ) )
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error when loading image sequence: %s - %s" %\
(str(e[0]), str(e[1]) ),
"Non consistent image shapes !?",
style=wx.ICON_ERROR)
else:
s = "Y.view( U.loadImg_seq(<fileslist>) )"
Y.shellMessage("### %s\n"% s)
def onExe(self, ev):
import sys,os,__main__
p = os.path.dirname( self.fn_or_fns )
sys.path.insert(0, p)
try:
try:
self.pyshell.addHistory("execfile(r\"%s\")"%(self.fn_or_fns,))
exec(compile(open(self.fn_or_fns).read(), self.fn_or_fns, 'exec'), __main__.__dict__)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
e = sys.exc_info()
wx.MessageBox("Error on execfile: %s - %s" %\
(str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
from Priithon.all import Y
Y.shellMessage("### execfile('%s')\n"%(self.fn_or_fns,))
self.pyshell.addHistory("execfile('%s')\n"%(self.fn_or_fns,))
finally:
#20090319 del sys.path[0]
sys.path.remove(p)
def onImport(self, ev):
import sys,os, __main__
p = os.path.dirname( self.fn_or_fns )
sys.path.insert(0, p)
try:
try:
mod = os.path.basename( self.fn_or_fns )
mod = os.path.splitext( mod )[0]
exec(('import %s' % mod), __main__.__dict__)
self.pyshell.addHistory("import %s"%mod)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error on import: %s - %s" %\
(str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
from Priithon.all import Y
Y.shellMessage("### import %s\n"% (mod,))
finally:
if wx.MessageBox("leave '%s' in front of sys.path ?" % (p,),
"Python import search path:", style=wx.YES_NO) != wx.YES:
#20090319 del sys.path[0]
sys.path.remove(p)
def onImportAs(self, ev):
v = wx.GetTextFromUser("import module as :", 'new mod name')
if not v:
return
import sys,os, __main__
p = os.path.dirname( self.fn_or_fns )
sys.path.insert(0, p)
try:
try:
mod = os.path.basename( self.fn_or_fns )
mod = os.path.splitext( mod )[0]
s = 'import %s as %s' % (mod, v)
exec((s), __main__.__dict__)
self.pyshell.addHistory(s)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error on 'import %s as %s': %s - %s" %\
(mod, v, str(e[0]), str(e[1]) ),
"Bad Varname !?",
style=wx.ICON_ERROR)
else:
from Priithon.all import Y
Y.shellMessage("### import %s as %s\n"% (mod,v))
finally:
if wx.MessageBox("leave '%s' in front of sys.path ?" % (p,),
"Python import search path:", style=wx.YES_NO) != wx.YES:
#20090319 del sys.path[0]
sys.path.remove(p)
def onEditor2(self, ev):
self.onEditor(ev, checkPyFile=False)
def onEditor(self, ev, checkPyFile=True):
import sys,os, __main__
try:
#mod = os.path.basename( filenames )
mod = self.fn_or_fns
if checkPyFile:
mod = os.path.splitext( mod )[0]
mod += '.py'
if not os.path.isfile(mod):
r= wx.MessageBox("do you want to start editing a new .py-file ?",
"py file not found !",
style=wx.CENTER|wx.YES_NO|wx.CANCEL|wx.ICON_EXCLAMATION)
if r != wx.YES:
return
from Priithon.all import Y
Y.editor(mod)
self.pyshell.addHistory("Y.editor( %s )"%mod)
except:
if NO_SPECIAL_GUI_EXCEPT:
raise
import sys
e = sys.exc_info()
wx.MessageBox("Error on starting Y.editor: %s - %s" %\
(str(e[0]), str(e[1]) ),
"Error !?",
style=wx.ICON_ERROR)
|
johnmwalters/ThinkStats2 | refs/heads/master | code/thinkstats2_test.py | 66 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import unittest
import random
from collections import Counter
import numpy as np
import thinkstats2
import thinkplot
class Test(unittest.TestCase):
def testOdds(self):
p = 0.75
o = thinkstats2.Odds(p)
self.assertEqual(o, 3)
p = thinkstats2.Probability(o)
self.assertEqual(p, 0.75)
p = thinkstats2.Probability2(3, 1)
self.assertEqual(p, 0.75)
def testMean(self):
t = [1, 1, 1, 3, 3, 591]
mean = thinkstats2.Mean(t)
self.assertEqual(mean, 100)
def testVar(self):
t = [1, 1, 1, 3, 3, 591]
mean = thinkstats2.Mean(t)
var1 = thinkstats2.Var(t)
var2 = thinkstats2.Var(t, mean)
self.assertAlmostEqual(mean, 100.0)
self.assertAlmostEqual(var1, 48217.0)
self.assertAlmostEqual(var2, 48217.0)
def testMeanVar(self):
t = [1, 1, 1, 3, 3, 591]
mean, var = thinkstats2.MeanVar(t)
self.assertAlmostEqual(mean, 100.0)
self.assertAlmostEqual(var, 48217.0)
def testBinomialCoef(self):
res = thinkstats2.BinomialCoef(10, 3)
self.assertEqual(round(res), 120)
res = thinkstats2.BinomialCoef(100, 4)
self.assertEqual(round(res), 3921225)
def testInterpolator(self):
xs = [1, 2, 3]
ys = [4, 5, 6]
interp = thinkstats2.Interpolator(xs, ys)
y = interp.Lookup(1)
self.assertAlmostEqual(y, 4)
y = interp.Lookup(2)
self.assertAlmostEqual(y, 5)
y = interp.Lookup(3)
self.assertAlmostEqual(y, 6)
y = interp.Lookup(1.5)
self.assertAlmostEqual(y, 4.5)
y = interp.Lookup(2.75)
self.assertAlmostEqual(y, 5.75)
x = interp.Reverse(4)
self.assertAlmostEqual(x, 1)
x = interp.Reverse(6)
self.assertAlmostEqual(x, 3)
x = interp.Reverse(4.5)
self.assertAlmostEqual(x, 1.5)
x = interp.Reverse(5.75)
self.assertAlmostEqual(x, 2.75)
def testTrim(self):
t = list(range(100))
random.shuffle(t)
trimmed = thinkstats2.Trim(t, p=0.05)
n = len(trimmed)
self.assertEqual(n, 90)
def testHist(self):
hist = thinkstats2.Hist('allen')
self.assertEqual(len(str(hist)), 38)
self.assertEqual(len(hist), 4)
self.assertEqual(hist.Freq('l'), 2)
hist = thinkstats2.Hist(Counter('allen'))
self.assertEqual(len(hist), 4)
self.assertEqual(hist.Freq('l'), 2)
hist2 = thinkstats2.Hist('nella')
self.assertEqual(hist, hist2)
def testPmf(self):
pmf = thinkstats2.Pmf('allen')
# this one might not be a robust test
self.assertEqual(len(str(pmf)), 45)
self.assertEqual(len(pmf), 4)
self.assertEqual(pmf.Prob('l'), 0.4)
self.assertEqual(pmf['l'], 0.4)
self.assertEqual(pmf.Percentile(50), 'l')
pmf = thinkstats2.Pmf(Counter('allen'))
self.assertEqual(len(pmf), 4)
self.assertEqual(pmf.Prob('l'), 0.4)
pmf = thinkstats2.Pmf(pmf)
self.assertEqual(len(pmf), 4)
self.assertEqual(pmf.Prob('l'), 0.4)
pmf2 = pmf.Copy()
self.assertEqual(pmf, pmf2)
xs, ys = pmf.Render()
self.assertEqual(tuple(xs), tuple(sorted(pmf.Values())))
def testPmfAddSub(self):
pmf = thinkstats2.Pmf([1, 2, 3, 4, 5, 6])
pmf1 = pmf + 1
self.assertAlmostEqual(pmf1.Mean(), 4.5)
pmf2 = pmf + pmf
self.assertAlmostEqual(pmf2.Mean(), 7.0)
pmf3 = pmf - 1
self.assertAlmostEqual(pmf3.Mean(), 2.5)
pmf4 = pmf - pmf
self.assertAlmostEqual(pmf4.Mean(), 0)
def testPmfMulDiv(self):
pmf = thinkstats2.Pmf([1, 2, 3, 4, 5, 6])
pmf1 = pmf * 2
self.assertAlmostEqual(pmf1.Mean(), 7)
pmf2 = pmf * pmf
self.assertAlmostEqual(pmf2.Mean(), 12.25)
pmf3 = pmf / 2
self.assertAlmostEqual(pmf3.Mean(), 1.75)
pmf4 = pmf / pmf
self.assertAlmostEqual(pmf4.Mean(), 1.4291667)
def testPmfProbLess(self):
d6 = thinkstats2.Pmf(range(1,7))
self.assertEqual(d6.ProbLess(4), 0.5)
self.assertEqual(d6.ProbGreater(3), 0.5)
two = d6 + d6
three = two + d6
self.assertAlmostEqual(two > three, 0.15200617284)
self.assertAlmostEqual(two < three, 0.778549382716049)
self.assertAlmostEqual(two.ProbGreater(three), 0.15200617284)
self.assertAlmostEqual(two.ProbLess(three), 0.778549382716049)
def testPmfMax(self):
d6 = thinkstats2.Pmf(range(1,7))
two = d6 + d6
three = two + d6
cdf = three.Max(6)
thinkplot.Cdf(cdf)
self.assertAlmostEqual(cdf[14], 0.558230962626)
def testCdf(self):
t = [1, 2, 2, 3, 5]
pmf = thinkstats2.Pmf(t)
hist = thinkstats2.Hist(t)
cdf = thinkstats2.Cdf(pmf)
self.assertEqual(len(str(cdf)), 37)
self.assertEqual(cdf[0], 0)
self.assertAlmostEqual(cdf[1], 0.2)
self.assertAlmostEqual(cdf[2], 0.6)
self.assertAlmostEqual(cdf[3], 0.8)
self.assertAlmostEqual(cdf[4], 0.8)
self.assertAlmostEqual(cdf[5], 1)
self.assertAlmostEqual(cdf[6], 1)
xs = range(7)
ps = cdf.Probs(xs)
for p1, p2 in zip(ps, [0, 0.2, 0.6, 0.8, 0.8, 1, 1]):
self.assertAlmostEqual(p1, p2)
self.assertEqual(cdf.Value(0), 1)
self.assertEqual(cdf.Value(0.1), 1)
self.assertEqual(cdf.Value(0.2), 1)
self.assertEqual(cdf.Value(0.3), 2)
self.assertEqual(cdf.Value(0.4), 2)
self.assertEqual(cdf.Value(0.5), 2)
self.assertEqual(cdf.Value(0.6), 2)
self.assertEqual(cdf.Value(0.7), 3)
self.assertEqual(cdf.Value(0.8), 3)
self.assertEqual(cdf.Value(0.9), 5)
self.assertEqual(cdf.Value(1), 5)
ps = np.linspace(0, 1, 11)
xs = cdf.ValueArray(ps)
self.assertTrue((xs == [1, 1, 1, 2, 2, 2, 2, 3, 3, 5, 5]).all())
np.random.seed(17)
xs = cdf.Sample(7)
self.assertListEqual(xs.tolist(), [2, 2, 1, 1, 3, 3, 3])
# when you make a Cdf from a Pdf, you might get some floating
# point representation error
self.assertEqual(len(cdf), 4)
self.assertAlmostEqual(cdf.Prob(2), 0.6)
self.assertAlmostEqual(cdf[2], 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.MakeCdfFromPmf(pmf)
self.assertEqual(len(cdf), 4)
self.assertAlmostEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.MakeCdfFromItems(pmf.Items())
self.assertEqual(len(cdf), 4)
self.assertAlmostEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.Cdf(pmf.d)
self.assertEqual(len(cdf), 4)
self.assertAlmostEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.MakeCdfFromDict(pmf.d)
self.assertEqual(len(cdf), 4)
self.assertAlmostEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.Cdf(hist)
self.assertEqual(len(cdf), 4)
self.assertEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.MakeCdfFromHist(hist)
self.assertEqual(len(cdf), 4)
self.assertEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.Cdf(t)
self.assertEqual(len(cdf), 4)
self.assertEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.MakeCdfFromList(t)
self.assertEqual(len(cdf), 4)
self.assertEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf = thinkstats2.Cdf(Counter(t))
self.assertEqual(len(cdf), 4)
self.assertEqual(cdf.Prob(2), 0.6)
self.assertEqual(cdf.Value(0.6), 2)
cdf2 = cdf.Copy()
self.assertEqual(cdf2.Prob(2), 0.6)
self.assertEqual(cdf2.Value(0.6), 2)
def testShift(self):
t = [1, 2, 2, 3, 5]
cdf = thinkstats2.Cdf(t)
cdf2 = cdf.Shift(1)
self.assertEqual(cdf[1], cdf2[2])
def testScale(self):
t = [1, 2, 2, 3, 5]
cdf = thinkstats2.Cdf(t)
cdf2 = cdf.Scale(2)
self.assertEqual(cdf[2], cdf2[4])
def testCdfRender(self):
t = [1, 2, 2, 3, 5]
cdf = thinkstats2.Cdf(t)
xs, ps = cdf.Render()
self.assertEqual(xs[0], 1)
self.assertEqual(ps[2], 0.2)
self.assertEqual(sum(xs), 22)
self.assertEqual(sum(ps), 4.2)
def testPmfFromCdf(self):
t = [1, 2, 2, 3, 5]
pmf = thinkstats2.Pmf(t)
cdf = thinkstats2.Cdf(pmf)
pmf2 = thinkstats2.Pmf(cdf)
for x in pmf.Values():
self.assertAlmostEqual(pmf[x], pmf2[x])
pmf3 = cdf.MakePmf()
for x in pmf.Values():
self.assertAlmostEqual(pmf[x], pmf3[x])
def testNormalPdf(self):
pdf = thinkstats2.NormalPdf(mu=1, sigma=2)
self.assertEqual(len(str(pdf)), 29)
self.assertAlmostEqual(pdf.Density(3), 0.12098536226)
pmf = pdf.MakePmf()
self.assertAlmostEqual(pmf[1.0], 0.0239951295619)
xs, ps = pdf.Render()
self.assertEqual(xs[0], -5.0)
self.assertAlmostEqual(ps[0], 0.0022159242059690038)
pmf = thinkstats2.Pmf(pdf)
self.assertAlmostEqual(pmf[1.0], 0.0239951295619)
xs, ps = pmf.Render()
self.assertEqual(xs[0], -5.0)
self.assertAlmostEqual(ps[0], 0.00026656181123)
cdf = thinkstats2.Cdf(pdf)
self.assertAlmostEqual(cdf[1.0], 0.51199756478094904)
xs, ps = cdf.Render()
self.assertEqual(xs[0], -5.0)
self.assertAlmostEqual(ps[0], 0.0)
def testExponentialPdf(self):
pdf = thinkstats2.ExponentialPdf(lam=0.5)
self.assertEqual(len(str(pdf)), 24)
self.assertAlmostEqual(pdf.Density(3), 0.11156508007421491)
pmf = pdf.MakePmf()
self.assertAlmostEqual(pmf[1.0], 0.02977166586593202)
xs, ps = pdf.Render()
self.assertEqual(xs[0], 0.0)
self.assertAlmostEqual(ps[0], 0.5)
def testEstimatedPdf(self):
pdf = thinkstats2.EstimatedPdf([1, 2, 2, 3, 5])
self.assertEqual(len(str(pdf)), 30)
self.assertAlmostEqual(pdf.Density(3)[0], 0.19629968)
pmf = pdf.MakePmf()
self.assertAlmostEqual(pmf[1.0], 0.010172282816895044)
pmf = pdf.MakePmf(low=0, high=6)
self.assertAlmostEqual(pmf[0.0], 0.0050742294053582942)
def testEvalNormalCdf(self):
p = thinkstats2.EvalNormalCdf(0)
self.assertAlmostEqual(p, 0.5)
p = thinkstats2.EvalNormalCdf(2, 2, 3)
self.assertAlmostEqual(p, 0.5)
p = thinkstats2.EvalNormalCdf(1000, 0, 1)
self.assertAlmostEqual(p, 1.0)
p = thinkstats2.EvalNormalCdf(-1000, 0, 1)
self.assertAlmostEqual(p, 0.0)
x = thinkstats2.EvalNormalCdfInverse(0.95, 0, 1)
self.assertAlmostEqual(x, 1.64485362695)
x = thinkstats2.EvalNormalCdfInverse(0.05, 0, 1)
self.assertAlmostEqual(x, -1.64485362695)
def testEvalPoissonPmf(self):
p = thinkstats2.EvalPoissonPmf(2, 1)
self.assertAlmostEqual(p, 0.1839397205)
def testCov(self):
t = [0, 4, 7, 3, 8, 1, 6, 2, 9, 5]
a = np.array(t)
t2 = [5, 4, 3, 0, 8, 9, 7, 6, 2, 1]
self.assertAlmostEqual(thinkstats2.Cov(t, a), 8.25)
self.assertAlmostEqual(thinkstats2.Cov(t, -a), -8.25)
self.assertAlmostEqual(thinkstats2.Corr(t, a), 1)
self.assertAlmostEqual(thinkstats2.Corr(t, -a), -1)
self.assertAlmostEqual(thinkstats2.Corr(t, t2), -0.1878787878)
self.assertAlmostEqual(thinkstats2.SpearmanCorr(t, -a), -1)
self.assertAlmostEqual(thinkstats2.SpearmanCorr(t, t2), -0.1878787878)
def testReadStataDct(self):
dct = thinkstats2.ReadStataDct('2002FemPreg.dct')
self.assertEqual(len(dct.variables), 243)
self.assertEqual(len(dct.colspecs), 243)
self.assertEqual(len(dct.names), 243)
self.assertEqual(dct.colspecs[-1][1], -1)
if __name__ == "__main__":
unittest.main()
|
jtrobec/pants | refs/heads/master | src/python/pants/option/option_value_container.py | 4 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
from pants.option.ranked_value import RankedValue
class OptionValueContainer(object):
"""A container for option values.
Implements "value ranking":
Attribute values can be ranked, so that a given attribute's value can only be changed if
the new value has at least as high a rank as the old value. This allows an option value in
an outer scope to override that option's value in an inner scope, when the outer scope's
value comes from a higher ranked source (e.g., the outer value comes from an env var and
the inner one from config).
See ranked_value.py for more details.
"""
def __init__(self):
self._value_map = {} # key -> either raw value or RankedValue wrapping the raw value.
def get_rank(self, key):
"""Returns the rank of the value at the specified key.
Returns one of the constants in RankedValue.
"""
return self._value_map.get(key).rank
def is_flagged(self, key):
"""Returns `True` if the value for the specified key was supplied via a flag.
A convenience equivalent to `get_rank(key) == RankedValue.FLAG`.
This check can be useful to determine whether or not a user explicitly set an option for this
run. Although a user might also set an option explicitly via an environment variable, ie via:
`ENV_VAR=value ./pants ...`, this is an ambiguous case since the environment variable could also
be permanently set in the user's environment.
:param string key: The name of the option to check.
:returns: `True` if the option was explicitly flagged by the user from the command line.
:rtype: bool
"""
return self.get_rank(key) == RankedValue.FLAG
def is_default(self, key):
"""Returns `True` if the value for the specified key was not supplied by the user.
I.e. the option was NOT specified config files, on the cli, or in environment variables.
:param string key: The name of the option to check.
:returns: `True` if the user did not set the value for this option.
:rtype: bool
"""
return self.get_rank(key) in (RankedValue.NONE, RankedValue.HARDCODED)
def update(self, attrs):
"""Set attr values on this object from the data in the attrs dict."""
for k, v in attrs.items():
self._set(k, v)
def get(self, key, default=None):
# Support dict-like dynamic access. See also __getitem__ below.
if key in self._value_map:
return self._get_underlying_value(key)
else:
return default
def _get_underlying_value(self, key):
# Note that the key may exist with a value of None, so we can't just
# test self._value_map.get() for None.
if key not in self._value_map:
raise AttributeError(key)
val = self._value_map[key]
if isinstance(val, RankedValue):
return val.value
else:
return val
def _set(self, key, value):
if key in self._value_map:
existing_value = self._value_map[key]
existing_rank = existing_value.rank
else:
existing_rank = RankedValue.NONE
if isinstance(value, RankedValue):
new_rank = value.rank
else:
raise AttributeError('Value must be of type RankedValue: {}'.format(value))
if new_rank >= existing_rank:
# We set values from outer scopes before values from inner scopes, so
# in case of equal rank we overwrite. That way that the inner scope value wins.
self._value_map[key] = value
# Support natural dynamic access, e.g., opts[foo] is more idiomatic than getattr(opts, 'foo').
def __getitem__(self, key):
return getattr(self, key)
# Support attribute setting, e.g., opts.foo = 42.
def __setattr__(self, key, value):
if key == '_value_map':
return super(OptionValueContainer, self).__setattr__(key, value)
self._set(key, value)
# Support attribute getting, e.g., foo = opts.foo.
# Note: Called only if regular attribute lookup fails,
# so method and member access will be handled the normal way.
def __getattr__(self, key):
if key == '_value_map':
# In case we get called in copy/deepcopy, which don't invoke the ctor.
raise AttributeError(key)
return self._get_underlying_value(key)
def __iter__(self):
"""Returns an iterator over all option names, in lexicographical order."""
for name in sorted(self._value_map.keys()):
yield name
def __copy__(self):
"""Ensure that a shallow copy has its own value map."""
ret = type(self)()
ret._value_map = copy.copy(self._value_map)
return ret
|
zzqcn/wireshark | refs/heads/zzqcn | test/suite_decryption.py | 1 | #
# Wireshark tests
# By Gerald Combs <gerald@wireshark.org>
#
# Ported from a set of Bash scripts which were copyright 2005 Ulf Lamping
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''Decryption tests'''
import os.path
import shutil
import subprocess
import subprocesstest
import sys
import sysconfig
import types
import unittest
import fixtures
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_80211(subprocesstest.SubprocessTestCase):
def test_80211_wep(self, cmd_tshark, capture_file):
'''IEEE 802.11 WEP'''
# Included in git sources test/captures/wep.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wep.pcapng.gz'),
))
self.assertTrue(self.grepOutput('Who has 192.168.5.1'))
self.assertTrue(self.grepOutput(r'Echo \(ping\) request'))
def test_80211_wpa_psk(self, cmd_tshark, capture_file):
'''IEEE 802.11 WPA PSK'''
# https://gitlab.com/wireshark/wireshark/-/wikis/SampleCaptures?action=AttachFile&do=view&target=wpa-Induction.pcap
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-Tfields',
'-e', 'http.request.uri',
'-r', capture_file('wpa-Induction.pcap.gz'),
'-Y', 'http',
))
self.assertTrue(self.grepOutput('favicon.ico'))
def test_80211_wpa_eap(self, cmd_tshark, capture_file):
'''IEEE 802.11 WPA EAP (EAPOL Rekey)'''
# Included in git sources test/captures/wpa-eap-tls.pcap.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-eap-tls.pcap.gz'),
'-Y', 'wlan.analysis.tk==7d9987daf5876249b6c773bf454a0da7',
))
self.assertTrue(self.grepOutput('Group Message'))
def test_80211_wpa_eapol_incomplete_rekeys(self, cmd_tshark, capture_file):
'''WPA decode with message1+2 only and secure bit set on message 2'''
# Included in git sources test/captures/wpa-test-decode.pcap.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-test-decode.pcap.gz'),
'-Y', 'icmp.resp_to == 4263',
))
self.assertTrue(self.grepOutput('Echo'))
def test_80211_wpa_psk_mfp(self, cmd_tshark, capture_file):
'''WPA decode management frames with MFP enabled (802.11w)'''
# Included in git sources test/captures/wpa-test-decode-mgmt.pcap.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-test-decode-mgmt.pcap.gz'),
'-Y', 'wlan.fixed.reason_code == 2 || wlan.fixed.category_code == 3',
))
self.assertEqual(self.countOutput('802.11.*SN=.*FN=.*Flags='), 3)
def test_80211_wpa2_psk_mfp(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode WPA2 PSK with MFP enabled (802.11w)'''
# Included in git sources test/captures/wpa2-psk-mfp.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa2-psk-mfp.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 4e30e8c019bea43ea5262b10853b818d || wlan.analysis.gtk == 70cdbf2e5bc0ca22e53930818a5d80e4',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK is correct
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK is correct
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK is correct
def test_80211_wpa_tdls(self, cmd_tshark, capture_file, features):
'''WPA decode traffic in a TDLS (Tunneled Direct-Link Setup) session (802.11z)'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
# Included in git sources test/captures/wpa-test-decode-tdls.pcap.gz
self.assertRun((cmd_tshark,
#'-ouat:80211_keys:"wpa-pwd","12345678"',
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-test-decode-tdls.pcap.gz'),
'-Y', 'icmp',
))
self.assertEqual(self.countOutput('ICMP.*Echo .ping'), 2)
def test_80211_wpa3_personal(self, cmd_tshark, capture_file):
'''IEEE 802.11 decode WPA3 personal / SAE'''
# Included in git sources test/captures/wpa3-sae.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa3-sae.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 20a2e28f4329208044f4d7edca9e20a6 || wlan.analysis.gtk == 1fc82f8813160031d6bf87bca22b6354',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.18'))
self.assertTrue(self.grepOutput('DHCP ACK'))
def test_80211_owe(self, cmd_tshark, capture_file):
'''IEEE 802.11 decode OWE'''
# Included in git sources test/captures/owe.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('owe.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 10f3deccc00d5c8f629fba7a0fff34aa || wlan.analysis.gtk == 016b04ae9e6050bcc1f940dda9ffff2b',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.2'))
self.assertTrue(self.grepOutput('DHCP ACK'))
def test_80211_wpa3_suite_b_192(self, cmd_tshark, capture_file):
'''IEEE 802.11 decode WPA3 Suite B 192-bit'''
# Included in git sources test/captures/wpa3-suiteb-192.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa3-suiteb-192.pcapng.gz'),
'-Tfields',
'-e' 'wlan.rsn.ie.gtk.key',
'-e' 'wlan.analysis.kck',
'-e' 'wlan.analysis.kek',
))
# Verify that correct PTKs (KCK, KEK) are derived and GTK correctly dissected
self.assertEqual(self.countOutput('^29f92526ccda5a5dfa0ffa44c26f576ee2d45bae7c5f63369103b1edcab206ea\t' \
'f49ac1a15121f1a597a60a469870450a588ef1f73a1017b1\t' \
'0289b022b4f54262048d3493834ae591e811870c4520ee1395dd215a6092fbfb$'), 1)
self.assertEqual(self.countOutput('^29f92526ccda5a5dfa0ffa44c26f576ee2d45bae7c5f63369103b1edcab206ea\t' \
'1027c8d5b155ff574158bc50083e28f02e9636a2ac694901\t' \
'd4814a364419fa881a8593083f51497fe9e30556a91cc5d0b11cd2b3226038e1$'), 1)
self.assertEqual(self.countOutput('^29f92526ccda5a5dfa0ffa44c26f576ee2d45bae7c5f63369103b1edcab206ea\t' \
'35db5e208c9caff2a4e00a54c5346085abaa6f422ef6df81\t' \
'a14d0d683c01bc631bf142e82dc4995d87364eeacfab75d74cf470683bd10c51$'), 1)
def test_80211_wpa1_gtk_rekey(self, cmd_tshark, capture_file):
'''Decode WPA1 with multiple GTK rekeys'''
# Included in git sources test/captures/wpa1-gtk-rekey.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa1-gtk-rekey.pcapng.gz'),
'-Y', 'wlan.analysis.tk == "d0e57d224c1bb8806089d8c23154074c" || wlan.analysis.gtk == "6eaf63f4ad7997ced353723de3029f4d" || wlan.analysis.gtk == "fb42811bcb59b7845376246454fbdab7"',
))
self.assertTrue(self.grepOutput('DHCP Discover'))
self.assertEqual(self.countOutput('ICMP.*Echo .ping'), 8)
def test_80211_wpa_extended_key_id_rekey(self, cmd_tshark, capture_file):
'''WPA decode for Extended Key ID'''
# Included in git sources test/captures/wpa_ptk_extended_key_id.pcap.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa_ptk_extended_key_id.pcap.gz'),
'-Tfields',
'-e' 'wlan.fc.type_subtype',
'-e' 'wlan.ra',
'-e' 'wlan.analysis.tk',
'-e' 'wlan.analysis.gtk',
'-e' 'wlan.rsn.ie.ptk.keyid',
))
# Verify frames are decoded with the correct key
self.assertEqual(self.countOutput('^32\t33:33:00:00:00:16\t\t234a9a6ddcca3cb728751cea49d01bb0\t$'), 5)
self.assertEqual(self.countOutput('^32\t33:33:ff:00:00:00\t\t234a9a6ddcca3cb728751cea49d01bb0\t$'), 1)
self.assertEqual(self.countOutput('^32\t33:33:ff:00:03:00\t\t234a9a6ddcca3cb728751cea49d01bb0\t$'), 1)
self.assertEqual(self.countOutput('^32\tff:ff:ff:ff:ff:ff\t\t234a9a6ddcca3cb728751cea49d01bb0\t$'), 4)
self.assertEqual(self.countOutput('^40\t02:00:00:00:03:00\t618b4d1829e2a496d7fd8c034a6d024d\t\t$'), 2)
self.assertEqual(self.countOutput('^40\t02:00:00:00:00:00\t618b4d1829e2a496d7fd8c034a6d024d\t\t$'), 1)
# Verify RSN PTK KeyID parsing
self.assertEqual(self.countOutput('^40\t02:00:00:00:00:00\t\t\t1$'), 1)
self.assertEqual(self.countOutput('^40\t02:00:00:00:00:00\tf31ecff5452f4c286cf66ef50d10dabe\t\t0$'), 1)
self.assertEqual(self.countOutput('^40\t02:00:00:00:00:00\t28dd851decf3f1c2a35df8bcc22fa1d2\t\t1$'), 1)
def test_80211_wpa_ccmp_256(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode CCMP-256'''
# Included in git sources test/captures/wpa-ccmp-256.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-ccmp-256.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 4e6abbcf9dc0943936700b6825952218f58a47dfdf51dbb8ce9b02fd7d2d9e40 || wlan.analysis.gtk == 502085ca205e668f7e7c61cdf4f731336bb31e4f5b28ec91860174192e9b2190',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK is correct
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK is correct
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK is correct
def test_80211_wpa_gcmp(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode GCMP'''
# Included in git sources test/captures/wpa-gcmp.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-gcmp.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 755a9c1c9e605d5ff62849e4a17a935c || wlan.analysis.gtk == 7ff30f7a8dd67950eaaf2f20a869a62d',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK is correct
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK is correct
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK is correct
def test_80211_wpa_gcmp_256(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode GCMP-256'''
# Included in git sources test/captures/wpa-gcmp-256.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-gcmp-256.pcapng.gz'),
'-Y', 'wlan.analysis.tk == b3dc2ff2d88d0d34c1ddc421cea17f304af3c46acbbe7b6d808b6ebf1b98ec38 || wlan.analysis.gtk == a745ee2313f86515a155c4cb044bc148ae234b9c72707f772b69c2fede3e4016',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK is correct
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK is correct
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK is correct
@fixtures.mark_usefixtures('test_env_80211_user_tk')
@fixtures.uses_fixtures
class case_decrypt_80211_user_tk(subprocesstest.SubprocessTestCase):
def test_80211_user_tk_tkip(self, cmd_tshark, capture_file):
'''IEEE 802.11 decode TKIP using user TK'''
# Included in git sources test/captures/wpa1-gtk-rekey.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa1-gtk-rekey.pcapng.gz'),
'-Y', 'wlan.analysis.tk == "d0e57d224c1bb8806089d8c23154074c" || wlan.analysis.gtk == "6eaf63f4ad7997ced353723de3029f4d" || wlan.analysis.gtk == "fb42811bcb59b7845376246454fbdab7"',
))
self.assertTrue(self.grepOutput('DHCP Discover'))
self.assertEqual(self.countOutput('ICMP.*Echo .ping'), 8)
def test_80211_user_tk_ccmp(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode CCMP-128 using user TK'''
# Included in git sources test/captures/wpa2-psk-mfp.pcapng.gz
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa2-psk-mfp.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 4e30e8c019bea43ea5262b10853b818d || wlan.analysis.gtk == 70cdbf2e5bc0ca22e53930818a5d80e4',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK decryption
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK decryption
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK decryption
def test_80211_user_tk_ccmp_256(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode CCMP-256 using user TK'''
# Included in git sources test/captures/wpa-ccmp-256.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-ccmp-256.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 4e6abbcf9dc0943936700b6825952218f58a47dfdf51dbb8ce9b02fd7d2d9e40 || wlan.analysis.gtk == 502085ca205e668f7e7c61cdf4f731336bb31e4f5b28ec91860174192e9b2190',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK decryption
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK decryption
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK decryption
def test_80211_user_tk_gcmp(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode GCMP using user TK'''
# Included in git sources test/captures/wpa-gcmp.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-gcmp.pcapng.gz'),
'-Y', 'wlan.analysis.tk == 755a9c1c9e605d5ff62849e4a17a935c || wlan.analysis.gtk == 7ff30f7a8dd67950eaaf2f20a869a62d',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK decryption
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK decryption
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK decryption
def test_80211_wpa_gcmp_256(self, cmd_tshark, capture_file, features):
'''IEEE 802.11 decode GCMP-256 using user TK'''
# Included in git sources test/captures/wpa-gcmp-256.pcapng.gz
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.assertRun((cmd_tshark,
'-o', 'wlan.enable_decryption: TRUE',
'-r', capture_file('wpa-gcmp-256.pcapng.gz'),
'-Y', 'wlan.analysis.tk == b3dc2ff2d88d0d34c1ddc421cea17f304af3c46acbbe7b6d808b6ebf1b98ec38 || wlan.analysis.gtk == a745ee2313f86515a155c4cb044bc148ae234b9c72707f772b69c2fede3e4016',
))
self.assertTrue(self.grepOutput('Who has 192.168.5.5')) # Verifies GTK decryption
self.assertTrue(self.grepOutput('DHCP Request')) # Verifies TK decryption
self.assertTrue(self.grepOutput(r'Echo \(ping\) request')) # Verifies TK decryption
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_dtls(subprocesstest.SubprocessTestCase):
def test_dtls_rsa(self, cmd_tshark, capture_file, features):
'''DTLS'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
# https://gitlab.com/wireshark/wireshark/-/wikis/SampleCaptures?action=AttachFile&do=view&target=snakeoil.tgz
self.assertRun((cmd_tshark,
'-r', capture_file('snakeoil-dtls.pcap'),
'-Tfields',
'-e', 'data.data',
'-Y', 'data',
))
self.assertTrue(self.grepOutput('697420776f726b20210a'))
def test_dtls_psk_aes128ccm8(self, cmd_tshark, capture_file):
'''DTLS 1.2 with PSK, AES-128-CCM-8'''
self.assertRun((cmd_tshark,
'-r', capture_file('dtls12-aes128ccm8.pcap'),
'-o', 'dtls.psk:ca19e028a8a372ad2d325f950fcaceed',
'-x'
))
dt_count = self.countOutput('Decrypted DTLS')
wfm_count = self.countOutput('Works for me!.')
self.assertTrue(dt_count == 7 and wfm_count == 2)
def test_dtls_dsb_aes128ccm8(self, cmd_tshark, capture_file):
'''DTLS 1.2 with master secrets in a pcapng Decryption Secrets Block.'''
self.assertRun((cmd_tshark,
'-r', capture_file('dtls12-aes128ccm8-dsb.pcapng'),
'-x'
))
dt_count = self.countOutput('Decrypted DTLS')
wfm_count = self.countOutput('Works for me!.')
self.assertTrue(dt_count == 7 and wfm_count == 2)
def test_dtls_udt(self, cmd_tshark, dirs, capture_file, features):
'''UDT over DTLS 1.2 with RSA key'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
key_file = os.path.join(dirs.key_dir, 'udt-dtls.key')
self.assertRun((cmd_tshark,
'-r', capture_file('udt-dtls.pcapng.gz'),
'-o', 'dtls.keys_list:0.0.0.0,0,data,{}'.format(key_file),
'-Y', 'dtls && udt.type==ack',
))
self.assertTrue(self.grepOutput('UDT'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_tls(subprocesstest.SubprocessTestCase):
def test_tls_rsa(self, cmd_tshark, capture_file, features):
'''TLS using the server's private RSA key.'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
# https://gitlab.com/wireshark/wireshark/-/wikis/SampleCaptures?action=AttachFile&do=view&target=snakeoil2_070531.tgz
self.assertRun((cmd_tshark,
'-r', capture_file('rsasnakeoil2.pcap'),
'-Tfields',
'-e', 'http.request.uri',
'-Y', 'http',
))
self.assertTrue(self.grepOutput('favicon.ico'))
def test_tls_rsa_pq(self, cmd_tshark, dirs, capture_file, features):
'''TLS using the server's private key with p < q
(test whether libgcrypt is correctly called)'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
key_file = os.path.join(dirs.key_dir, 'rsa-p-lt-q.key')
self.assertRun((cmd_tshark,
'-r', capture_file('rsa-p-lt-q.pcap'),
'-o', 'tls.keys_list:0.0.0.0,443,http,{}'.format(key_file),
'-Tfields',
'-e', 'http.request.uri',
'-Y', 'http',
))
self.assertTrue(self.grepOutput('/'))
def test_tls_rsa_privkeys_uat(self, cmd_tshark, dirs, capture_file, features):
'''Check TLS decryption works using the rsa_keys UAT.'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
key_file = os.path.join(dirs.key_dir, 'rsa-p-lt-q.key')
proc = self.assertRun((cmd_tshark,
'-r', capture_file('rsa-p-lt-q.pcap'),
'-o', 'uat:rsa_keys:"{}",""'.format(key_file.replace('\\', '\\x5c')),
'-Tfields',
'-e', 'http.request.uri',
'-Y', 'http',
))
self.assertIn('/', proc.stdout_str)
def test_tls_rsa_with_password(self, cmd_tshark, capture_file, features):
'''TLS using the server's private key with password'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
self.assertRun((cmd_tshark,
'-r', capture_file('dmgr.pcapng'),
'-Tfields',
'-e', 'http.request.uri',
'-Y', 'http',
))
self.assertTrue(self.grepOutput('unsecureLogon.jsp'))
def test_tls_master_secret(self, cmd_tshark, dirs, capture_file):
'''TLS using the master secret and ssl.keylog_file preference aliasing'''
key_file = os.path.join(dirs.key_dir, 'dhe1_keylog.dat')
self.assertRun((cmd_tshark,
'-r', capture_file('dhe1.pcapng.gz'),
'-o', 'ssl.keylog_file: {}'.format(key_file),
'-o', 'tls.desegment_ssl_application_data: FALSE',
'-o', 'http.tls.port: 443',
'-Tfields',
'-e', 'http.request.method',
'-e', 'http.request.uri',
'-e', 'http.request.version',
'-Y', 'http',
))
self.assertTrue(self.grepOutput(r'GET\s+/test\s+HTTP/1.0'))
def test_tls12_renegotiation(self, cmd_tshark, dirs, capture_file, features):
'''TLS 1.2 with renegotiation'''
if not features.have_gnutls:
self.skipTest('Requires GnuTLS.')
key_file = os.path.join(dirs.key_dir, 'rsasnakeoil2.key')
# Test protocol alias while at it (ssl -> tls)
self.assertRun((cmd_tshark,
'-r', capture_file('tls-renegotiation.pcap'),
'-o', 'tls.keys_list:0.0.0.0,4433,http,{}'.format(key_file),
'-d', 'tcp.port==4433,ssl',
'-Tfields',
'-e', 'http.content_length',
'-Y', 'http',
))
count_0 = self.countOutput('^0$')
count_2151 = self.countOutput('^2151$')
self.assertTrue(count_0 == 1 and count_2151 == 1)
def test_tls12_psk_aes128ccm(self, cmd_tshark, capture_file):
'''TLS 1.2 with PSK, AES-128-CCM'''
self.assertRun((cmd_tshark,
'-r', capture_file('tls12-aes128ccm.pcap'),
'-o', 'tls.psk:ca19e028a8a372ad2d325f950fcaceed',
'-q',
'-z', 'follow,tls,ascii,0',
))
self.assertTrue(self.grepOutput('http://www.gnu.org/software/gnutls'))
def test_tls12_psk_aes256gcm(self, cmd_tshark, capture_file):
'''TLS 1.2 with PSK, AES-256-GCM'''
self.assertRun((cmd_tshark,
'-r', capture_file('tls12-aes256gcm.pcap'),
'-o', 'tls.psk:ca19e028a8a372ad2d325f950fcaceed',
'-q',
'-z', 'follow,tls,ascii,0',
))
self.assertTrue(self.grepOutput('http://www.gnu.org/software/gnutls'))
def test_tls12_chacha20poly1305(self, cmd_tshark, dirs, features, capture_file):
'''TLS 1.2 with ChaCha20-Poly1305'''
if not features.have_libgcrypt17:
self.skipTest('Requires GCrypt 1.7 or later.')
key_file = os.path.join(dirs.key_dir, 'tls12-chacha20poly1305.keys')
ciphers=[
'ECDHE-ECDSA-CHACHA20-POLY1305',
'ECDHE-RSA-CHACHA20-POLY1305',
'DHE-RSA-CHACHA20-POLY1305',
'RSA-PSK-CHACHA20-POLY1305',
'DHE-PSK-CHACHA20-POLY1305',
'ECDHE-PSK-CHACHA20-POLY1305',
'PSK-CHACHA20-POLY1305',
]
stream = 0
for cipher in ciphers:
self.assertRun((cmd_tshark,
'-r', capture_file('tls12-chacha20poly1305.pcap'),
'-o', 'tls.keylog_file: {}'.format(key_file),
'-q',
'-z', 'follow,tls,ascii,{}'.format(stream),
))
stream += 1
self.assertTrue(self.grepOutput('Cipher is {}'.format(cipher)))
def test_tls13_chacha20poly1305(self, cmd_tshark, dirs, features, capture_file):
'''TLS 1.3 with ChaCha20-Poly1305'''
if not features.have_libgcrypt17:
self.skipTest('Requires GCrypt 1.7 or later.')
key_file = os.path.join(dirs.key_dir, 'tls13-20-chacha20poly1305.keys')
self.assertRun((cmd_tshark,
'-r', capture_file('tls13-20-chacha20poly1305.pcap'),
'-o', 'tls.keylog_file: {}'.format(key_file),
'-q',
'-z', 'follow,tls,ascii,0',
))
self.assertTrue(self.grepOutput('TLS13-CHACHA20-POLY1305-SHA256'))
def test_tls13_rfc8446(self, cmd_tshark, dirs, features, capture_file):
'''TLS 1.3 (normal session, then early data followed by normal data).'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
key_file = os.path.join(dirs.key_dir, 'tls13-rfc8446.keys')
proc = self.assertRun((cmd_tshark,
'-r', capture_file('tls13-rfc8446.pcap'),
'-otls.keylog_file:{}'.format(key_file),
'-Y', 'http',
'-Tfields',
'-e', 'frame.number',
'-e', 'http.request.uri',
'-e', 'http.file_data',
'-E', 'separator=|',
))
self.assertEqual([
r'5|/first|',
r'6||Request for /first, version TLSv1.3, Early data: no\n',
r'8|/early|',
r'10||Request for /early, version TLSv1.3, Early data: yes\n',
r'12|/second|',
r'13||Request for /second, version TLSv1.3, Early data: yes\n',
], proc.stdout_str.splitlines())
def test_tls13_rfc8446_noearly(self, cmd_tshark, dirs, features, capture_file):
'''TLS 1.3 (with undecryptable early data).'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
key_file = os.path.join(dirs.key_dir, 'tls13-rfc8446-noearly.keys')
proc = self.assertRun((cmd_tshark,
'-r', capture_file('tls13-rfc8446.pcap'),
'-otls.keylog_file:{}'.format(key_file),
'-Y', 'http',
'-Tfields',
'-e', 'frame.number',
'-e', 'http.request.uri',
'-e', 'http.file_data',
'-E', 'separator=|',
))
self.assertEqual([
r'5|/first|',
r'6||Request for /first, version TLSv1.3, Early data: no\n',
r'10||Request for /early, version TLSv1.3, Early data: yes\n',
r'12|/second|',
r'13||Request for /second, version TLSv1.3, Early data: yes\n',
], proc.stdout_str.splitlines())
def test_tls12_dsb(self, cmd_tshark, capture_file):
'''TLS 1.2 with master secrets in pcapng Decryption Secrets Blocks.'''
output = self.assertRun((cmd_tshark,
'-r', capture_file('tls12-dsb.pcapng'),
'-Tfields',
'-e', 'http.host',
'-e', 'http.response.code',
'-Y', 'http',
)).stdout_str
self.assertEqual('example.com\t\n\t200\nexample.net\t\n\t200\n', output)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_zigbee(subprocesstest.SubprocessTestCase):
def test_zigbee(self, cmd_tshark, capture_file):
'''ZigBee'''
# https://gitlab.com/wireshark/wireshark/-/issues/7022
self.assertRun((cmd_tshark,
'-r', capture_file('sample_control4_2012-03-24.pcap'),
'-Tfields',
'-e', 'data.data',
'-Y', 'zbee_aps',
))
self.assertTrue(self.grepOutput('3067636338652063342e646d2e747620'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_ansi_c1222(subprocesstest.SubprocessTestCase):
def test_ansi_c1222(self, cmd_tshark, capture_file):
'''ANSI C12.22'''
# https://gitlab.com/wireshark/wireshark/-/issues/9196
self.assertRun((cmd_tshark,
'-r', capture_file('c1222_std_example8.pcap'),
'-o', 'c1222.decrypt: TRUE',
'-o', 'c1222.baseoid: 2.16.124.113620.1.22.0',
'-Tfields',
'-e', 'c1222.data',
))
self.assertTrue(self.grepOutput('00104d414e55464143545552455220534e2092'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_dvb_ci(subprocesstest.SubprocessTestCase):
def test_dvb_ci(self, cmd_tshark, capture_file):
'''DVB-CI'''
# simplified version of the sample capture in
# https://gitlab.com/wireshark/wireshark/-/issues/6700
self.assertRun((cmd_tshark,
'-r', capture_file('dvb-ci_UV1_0000.pcap'),
'-o', 'dvb-ci.sek: 00000000000000000000000000000000',
'-o', 'dvb-ci.siv: 00000000000000000000000000000000',
'-Tfields',
'-e', 'dvb-ci.cc.sac.padding',
))
self.assertTrue(self.grepOutput('800000000000000000000000'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_ipsec(subprocesstest.SubprocessTestCase):
def test_ipsec_esp(self, cmd_tshark, capture_file):
'''IPsec ESP'''
# https://gitlab.com/wireshark/wireshark/-/issues/12671
self.assertRun((cmd_tshark,
'-r', capture_file('esp-bug-12671.pcapng.gz'),
'-o', 'esp.enable_encryption_decode: TRUE',
'-Tfields',
'-e', 'data.data',
))
self.assertTrue(self.grepOutput('08090a0b0c0d0e0f1011121314151617'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_ike_isakmp(subprocesstest.SubprocessTestCase):
def test_ikev1_certs(self, cmd_tshark, capture_file):
'''IKEv1 (ISAKMP) with certificates'''
# https://gitlab.com/wireshark/wireshark/-/issues/7951
self.assertRun((cmd_tshark,
'-r', capture_file('ikev1-certs.pcap'),
'-Tfields',
'-e', 'x509sat.printableString',
))
self.assertTrue(self.grepOutput('OpenSwan'))
def test_ikev1_simultaneous(self, cmd_tshark, capture_file):
'''IKEv1 (ISAKMP) simultaneous exchanges'''
# https://gitlab.com/wireshark/wireshark/-/issues/12610
self.assertRun((cmd_tshark,
'-r', capture_file('ikev1-bug-12610.pcapng.gz'),
'-Tfields',
'-e', 'isakmp.hash',
))
self.assertTrue(self.grepOutput('b52521f774967402c9f6cee95fd17e5b'))
def test_ikev1_unencrypted(self, cmd_tshark, capture_file):
'''IKEv1 (ISAKMP) unencrypted phase 1'''
# https://gitlab.com/wireshark/wireshark/-/issues/12620
self.assertRun((cmd_tshark,
'-r', capture_file('ikev1-bug-12620.pcapng.gz'),
'-Tfields',
'-e', 'isakmp.hash',
))
self.assertTrue(self.grepOutput('40043b640f4373250d5ac3a1fb63153c'))
def test_ikev2_3des_sha160(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (3DES-CBC/SHA1_160)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-3des-sha1_160.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('02f7a0d5f1fdc8ea81039818c65bb9bd09af9b8917319b887ff9ba3046c344c7'))
def test_ikev2_aes128_ccm12(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-128-CCM-12) - with CBC-MAC verification'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes128ccm12.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('c2104394299e1ffe7908ea720ad5d13717a0d454e4fa0a2128ea689411f479c4'))
def test_ikev2_aes128_ccm12_2(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-128-CCM-12 using CTR mode, without checksum)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes128ccm12-2.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('aaa281c87b4a19046c57271d557488ca413b57228cb951f5fa9640992a0285b9'))
def test_ikev2_aes192ctr_sha512(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-192-CTR/SHA2-512)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes192ctr.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('3ec23dcf9348485638407c754547aeb3085290082c49f583fdbae59263a20b4a'))
def test_ikev2_aes256cbc_sha256(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-256-CBC/SHA2-256)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes256cbc.pcapng'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('e1a8d550064201a7ec024a85758d0673c61c5c510ac13bcd225d6327f50da3d3'))
def test_ikev2_aes256ccm16(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-256-CCM-16)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes256ccm16.pcapng'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('fa2e74bdc01e30fb0b3ddc9723c9449095969da51f69e560209d2c2b7940210a'))
def test_ikev2_aes256gcm16(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-256-GCM-16)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes256gcm16.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('9ab71f14ab553cad873a1aa70b99df155dee77cdcf3694b3b7527acbb9712ded'))
def test_ikev2_aes256gcm8(self, cmd_tshark, capture_file):
'''IKEv2 decryption test (AES-256-GCM-8)'''
self.assertRun((cmd_tshark,
'-r', capture_file('ikev2-decrypt-aes256gcm8.pcap'),
'-Tfields',
'-e', 'isakmp.auth.data',
))
self.assertTrue(self.grepOutput('4a66d822d0afbc22ad9a92a2cf4287c920ad8ac3b069a4a7e75fe0a5d499f914'))
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_http2(subprocesstest.SubprocessTestCase):
def test_http2(self, cmd_tshark, capture_file, features):
'''HTTP2 (HPACK)'''
if not features.have_nghttp2:
self.skipTest('Requires nghttp2.')
self.assertRun((cmd_tshark,
'-r', capture_file('packet-h2-14_headers.pcapng'),
'-Tfields',
'-e', 'http2.header.value',
'-d', 'tcp.port==3000,http2',
))
test_passed = self.grepOutput('nghttp2')
if not test_passed:
self.log_fd.write('\n\n-- Verbose output --\n\n')
self.assertRun((cmd_tshark,
'-r', capture_file('packet-h2-14_headers.pcapng'),
'-V',
'-d', 'tcp.port==3000,http2',
))
self.assertTrue(test_passed)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_kerberos(subprocesstest.SubprocessTestCase):
def test_kerberos(self, cmd_tshark, dirs, features, capture_file):
'''Kerberos'''
# Files are from krb-816.zip on the SampleCaptures page.
if not features.have_kerberos:
self.skipTest('Requires kerberos.')
keytab_file = os.path.join(dirs.key_dir, 'krb-816.keytab')
self.assertRun((cmd_tshark,
'-r', capture_file('krb-816.pcap.gz'),
'-o', 'kerberos.decrypt: TRUE',
'-o', 'kerberos.file: {}'.format(keytab_file),
'-Tfields',
'-e', 'kerberos.keyvalue',
))
# keyvalue: ccda7d48219f73c3b28311c4ba7242b3
self.assertTrue(self.grepOutput('ccda7d48219f73c3b28311c4ba7242b3'))
@fixtures.fixture(scope='session')
def run_wireguard_test(cmd_tshark, capture_file, features):
if not features.have_libgcrypt18:
fixtures.skip('Requires Gcrypt 1.8 or later')
def runOne(self, args, keylog=None, pcap_file='wireguard-ping-tcp.pcap'):
if keylog:
keylog_file = self.filename_from_id('wireguard.keys')
args += ['-owg.keylog_file:%s' % keylog_file]
with open(keylog_file, 'w') as f:
f.write("\n".join(keylog))
proc = self.assertRun([cmd_tshark, '-r', capture_file(pcap_file)] + args)
lines = proc.stdout_str.splitlines()
return lines
return runOne
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_wireguard(subprocesstest.SubprocessTestCase):
# The "foo_alt" keys are similar as "foo" except that some bits are changed.
# The crypto library should be able to handle this and internally the
# dissector uses MSB to recognize whether a private key is set.
key_Spriv_i = 'AKeZaHwBxjiKLFnkY2unvEdOTtg4AL+M9dQXfopFVFk='
key_Spriv_i_alt = 'B6eZaHwBxjiKLFnkY2unvEdOTtg4AL+M9dQXfopFVJk='
key_Spub_i = 'Igge9KzRytKNwrgkzDE/8hrLu6Ly0OqVdvOPWhA5KR4='
key_Spriv_r = 'cFIxTUyBs1Qil414hBwEgvasEax8CKJ5IS5ZougplWs='
key_Spub_r = 'YDCttCs9e1J52/g9vEnwJJa+2x6RqaayAYMpSVQfGEY='
key_Epriv_i0 = 'sLGLJSOQfyz7JNJ5ZDzFf3Uz1rkiCMMjbWerNYcPFFU='
key_Epriv_i0_alt = 't7GLJSOQfyz7JNJ5ZDzFf3Uz1rkiCMMjbWerNYcPFJU='
key_Epriv_r0 = 'QC4/FZKhFf0b/eXEcCecmZNt6V6PXmRa4EWG1PIYTU4='
key_Epriv_i1 = 'ULv83D+y3vA0t2mgmTmWz++lpVsrP7i4wNaUEK2oX0E='
key_Epriv_r1 = 'sBv1dhsm63cbvWMv/XML+bvynBp9PTdY9Vvptu3HQlg='
# Ephemeral keys and PSK for wireguard-psk.pcap
key_Epriv_i2 = 'iCv2VTi/BC/q0egU931KXrrQ4TSwXaezMgrhh7uCbXs='
key_Epriv_r2 = '8G1N3LnEqYC7+NW/b6mqceVUIGBMAZSm+IpwG1U0j0w='
key_psk2 = '//////////////////////////////////////////8='
key_Epriv_i3 = '+MHo9sfkjPsjCx7lbVhRLDvMxYvTirOQFDSdzAW6kUQ='
key_Epriv_r3 = '0G6t5j1B/We65MXVEBIGuRGYadwB2ITdvJovtAuATmc='
key_psk3 = 'iIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIg='
# dummy key that should not work with anything.
key_dummy = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx='
def test_mac1_public(self, run_wireguard_test):
"""Check that MAC1 identification using public keys work."""
lines = run_wireguard_test(self, [
'-ouat:wg_keys:"Public","%s"' % self.key_Spub_i,
'-ouat:wg_keys:"Public","%s"' % self.key_Spub_r,
'-Y', 'wg.receiver_pubkey',
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.receiver_pubkey',
'-e', 'wg.receiver_pubkey.known_privkey',
])
self.assertEqual(4, len(lines))
self.assertIn('1\t%s\t0' % self.key_Spub_r, lines)
self.assertIn('2\t%s\t0' % self.key_Spub_i, lines)
self.assertIn('13\t%s\t0' % self.key_Spub_r, lines)
self.assertIn('14\t%s\t0' % self.key_Spub_i, lines)
def test_mac1_private(self, run_wireguard_test):
"""Check that MAC1 identification using private keys work."""
lines = run_wireguard_test(self, [
'-ouat:wg_keys:"Private","%s"' % self.key_Spriv_i,
'-ouat:wg_keys:"Private","%s"' % self.key_Spriv_r,
'-Y', 'wg.receiver_pubkey',
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.receiver_pubkey',
'-e', 'wg.receiver_pubkey.known_privkey',
])
self.assertEqual(4, len(lines))
self.assertIn('1\t%s\t1' % self.key_Spub_r, lines)
self.assertIn('2\t%s\t1' % self.key_Spub_i, lines)
self.assertIn('13\t%s\t1' % self.key_Spub_r, lines)
self.assertIn('14\t%s\t1' % self.key_Spub_i, lines)
def test_decrypt_initiation_sprivr(self, run_wireguard_test):
"""Check for partial decryption using Spriv_r."""
lines = run_wireguard_test(self, [
'-ouat:wg_keys:"Private","%s"' % self.key_Spriv_r,
'-Y', 'wg.type==1',
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.static',
'-e', 'wg.static.known_pubkey',
'-e', 'wg.static.known_privkey',
'-e', 'wg.timestamp.nanoseconds',
])
# static pubkey is unknown because Spub_i is not added to wg_keys.
self.assertIn('1\t%s\t0\t0\t%s' % (self.key_Spub_i, '356537872'), lines)
self.assertIn('13\t%s\t0\t0\t%s' % (self.key_Spub_i, '490514356'), lines)
def test_decrypt_initiation_ephemeral_only(self, run_wireguard_test):
"""Check for partial decryption using Epriv_i."""
lines = run_wireguard_test(self, [
'-ouat:wg_keys:"Public","%s"' % self.key_Spub_r,
'-Y', 'wg.type==1',
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.ephemeral.known_privkey',
'-e', 'wg.static',
'-e', 'wg.timestamp.nanoseconds',
], keylog=[
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i0,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i1,
])
# The current implementation tries to write as much decrypted data as
# possible, even if the full handshake cannot be derived.
self.assertIn('1\t1\t%s\t%s' % (self.key_Spub_i, ''), lines)
self.assertIn('13\t1\t%s\t%s' % (self.key_Spub_i, ''), lines)
def test_decrypt_full_initiator(self, run_wireguard_test):
"""
Check for full handshake decryption using Spriv_r + Epriv_i.
The public key Spub_r is provided via the key log as well.
"""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.ephemeral.known_privkey',
'-e', 'wg.static',
'-e', 'wg.timestamp.nanoseconds',
'-e', 'wg.handshake_ok',
'-e', 'icmp.type',
'-e', 'tcp.dstport',
], keylog=[
' REMOTE_STATIC_PUBLIC_KEY = %s' % self.key_Spub_r,
' LOCAL_STATIC_PRIVATE_KEY = %s' % self.key_Spriv_i_alt,
' LOCAL_EPHEMERAL_PRIVATE_KEY = %s' % self.key_Epriv_i0_alt,
' LOCAL_EPHEMERAL_PRIVATE_KEY = %s' % self.key_Epriv_i1,
])
self.assertIn('1\t1\t%s\t%s\t\t\t' % (self.key_Spub_i, '356537872'), lines)
self.assertIn('2\t0\t\t\t1\t\t', lines)
self.assertIn('3\t\t\t\t\t8\t', lines)
self.assertIn('4\t\t\t\t\t0\t', lines)
self.assertIn('13\t1\t%s\t%s\t\t\t' % (self.key_Spub_i, '490514356'), lines)
self.assertIn('14\t0\t\t\t1\t\t', lines)
self.assertIn('17\t\t\t\t\t\t443', lines)
self.assertIn('18\t\t\t\t\t\t49472', lines)
def test_decrypt_wg_full_initiator_dsb(self, run_wireguard_test):
"""
Similar to test_decrypt_full_initiator, but using decryption keys
embedded in the pcapng file. The embedded secrets do not contain leading
spaces nor spaces around the '=' character.
"""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.ephemeral.known_privkey',
'-e', 'wg.static',
'-e', 'wg.timestamp.nanoseconds',
'-e', 'wg.handshake_ok',
'-e', 'icmp.type',
'-e', 'tcp.dstport',
], pcap_file='wireguard-ping-tcp-dsb.pcapng')
self.assertIn('1\t1\t%s\t%s\t\t\t' % (self.key_Spub_i, '356537872'), lines)
self.assertIn('2\t0\t\t\t1\t\t', lines)
self.assertIn('3\t\t\t\t\t8\t', lines)
self.assertIn('4\t\t\t\t\t0\t', lines)
self.assertIn('13\t1\t%s\t%s\t\t\t' % (self.key_Spub_i, '490514356'), lines)
self.assertIn('14\t0\t\t\t1\t\t', lines)
self.assertIn('17\t\t\t\t\t\t443', lines)
self.assertIn('18\t\t\t\t\t\t49472', lines)
def test_decrypt_full_responder(self, run_wireguard_test):
"""Check for full handshake decryption using responder secrets."""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.ephemeral.known_privkey',
'-e', 'wg.static',
'-e', 'wg.timestamp.nanoseconds',
'-e', 'wg.handshake_ok',
'-e', 'icmp.type',
'-e', 'tcp.dstport',
], keylog=[
'REMOTE_STATIC_PUBLIC_KEY=%s' % self.key_Spub_i,
'LOCAL_STATIC_PRIVATE_KEY=%s' % self.key_Spriv_r,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r0,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r1,
])
self.assertIn('1\t0\t%s\t%s\t\t\t' % (self.key_Spub_i, '356537872'), lines)
self.assertIn('2\t1\t\t\t1\t\t', lines)
self.assertIn('3\t\t\t\t\t8\t', lines)
self.assertIn('4\t\t\t\t\t0\t', lines)
self.assertIn('13\t0\t%s\t%s\t\t\t' % (self.key_Spub_i, '490514356'), lines)
self.assertIn('14\t1\t\t\t1\t\t', lines)
self.assertIn('17\t\t\t\t\t\t443', lines)
self.assertIn('18\t\t\t\t\t\t49472', lines)
def test_decrypt_psk_initiator(self, run_wireguard_test):
"""Check whether PSKs enable decryption for initiation keys."""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.handshake_ok',
], keylog=[
'REMOTE_STATIC_PUBLIC_KEY = %s' % self.key_Spub_r,
'LOCAL_STATIC_PRIVATE_KEY = %s' % self.key_Spriv_i,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i2,
'PRESHARED_KEY=%s' % self.key_psk2,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r3,
'PRESHARED_KEY=%s' % self.key_psk3,
], pcap_file='wireguard-psk.pcap')
self.assertIn('2\t1', lines)
self.assertIn('4\t1', lines)
def test_decrypt_psk_responder(self, run_wireguard_test):
"""Check whether PSKs enable decryption for responder keys."""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.handshake_ok',
], keylog=[
'REMOTE_STATIC_PUBLIC_KEY=%s' % self.key_Spub_i,
'LOCAL_STATIC_PRIVATE_KEY=%s' % self.key_Spriv_r,
# Epriv_r2 needs psk2. This tests handling of duplicate ephemeral
# keys with multiple PSKs. It should not have adverse effects.
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r2,
'PRESHARED_KEY=%s' % self.key_dummy,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r2,
'PRESHARED_KEY=%s' % self.key_psk2,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i3,
'PRESHARED_KEY=%s' % self.key_psk3,
# Epriv_i3 needs psk3, this tests that additional keys again have no
# bad side-effects.
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i3,
'PRESHARED_KEY=%s' % self.key_dummy,
], pcap_file='wireguard-psk.pcap')
self.assertIn('2\t1', lines)
self.assertIn('4\t1', lines)
def test_decrypt_psk_wrong_orderl(self, run_wireguard_test):
"""Check that the wrong order of lines indeed fail decryption."""
lines = run_wireguard_test(self, [
'-Tfields',
'-e', 'frame.number',
'-e', 'wg.handshake_ok',
], keylog=[
'REMOTE_STATIC_PUBLIC_KEY=%s' % self.key_Spub_i,
'LOCAL_STATIC_PRIVATE_KEY=%s' % self.key_Spriv_r,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_r2,
'LOCAL_EPHEMERAL_PRIVATE_KEY=%s' % self.key_Epriv_i3,
'PRESHARED_KEY=%s' % self.key_psk2, # note: swapped with previous line
'PRESHARED_KEY=%s' % self.key_psk3,
], pcap_file='wireguard-psk.pcap')
self.assertIn('2\t0', lines)
self.assertIn('4\t0', lines)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_knxip(subprocesstest.SubprocessTestCase):
# Capture files for these tests contain single telegrams.
# For realistic (live captured) KNX/IP telegram sequences, see:
# https://gitlab.com/wireshark/wireshark/-/issues/14825
def test_knxip_data_security_decryption_ok(self, cmd_tshark, capture_file):
'''KNX/IP: Data Security decryption OK'''
# capture_file('knxip_DataSec.pcap') contains KNX/IP ConfigReq DataSec PropExtValueWriteCon telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_DataSec.pcap'),
'-o', 'kip.key_1:00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F',
))
self.assertTrue(self.grepOutput(' DataSec '))
self.assertTrue(self.grepOutput(' PropExtValueWriteCon '))
def test_knxip_data_security_decryption_fails(self, cmd_tshark, capture_file):
'''KNX/IP: Data Security decryption fails'''
# capture_file('knxip_DataSec.pcap') contains KNX/IP ConfigReq DataSec PropExtValueWriteCon telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_DataSec.pcap'),
'-o', 'kip.key_1:""', # "" is really necessary, otherwise test fails
))
self.assertTrue(self.grepOutput(' DataSec '))
self.assertFalse(self.grepOutput(' PropExtValueWriteCon '))
def test_knxip_secure_wrapper_decryption_ok(self, cmd_tshark, capture_file):
'''KNX/IP: SecureWrapper decryption OK'''
# capture_file('knxip_SecureWrapper.pcap') contains KNX/IP SecureWrapper RoutingInd telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_SecureWrapper.pcap'),
'-o', 'kip.key_1:00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F',
))
self.assertTrue(self.grepOutput(' SecureWrapper '))
self.assertTrue(self.grepOutput(' RoutingInd '))
def test_knxip_secure_wrapper_decryption_fails(self, cmd_tshark, capture_file):
'''KNX/IP: SecureWrapper decryption fails'''
# capture_file('knxip_SecureWrapper.pcap') contains KNX/IP SecureWrapper RoutingInd telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_SecureWrapper.pcap'),
'-o', 'kip.key_1:""', # "" is really necessary, otherwise test fails
))
self.assertTrue(self.grepOutput(' SecureWrapper '))
self.assertFalse(self.grepOutput(' RoutingInd '))
def test_knxip_timer_notify_authentication_ok(self, cmd_tshark, capture_file):
'''KNX/IP: TimerNotify authentication OK'''
# capture_file('knxip_TimerNotify.pcap') contains KNX/IP TimerNotify telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_TimerNotify.pcap'),
'-o', 'kip.key_1:00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F',
))
self.assertTrue(self.grepOutput(' TimerNotify '))
self.assertTrue(self.grepOutput(' OK$'))
def test_knxip_timer_notify_authentication_fails(self, cmd_tshark, capture_file):
'''KNX/IP: TimerNotify authentication fails'''
# capture_file('knxip_TimerNotify.pcap') contains KNX/IP TimerNotify telegram
self.assertRun((cmd_tshark,
'-r', capture_file('knxip_TimerNotify.pcap'),
'-o', 'kip.key_1:""', # "" is really necessary, otherwise test fails
))
self.assertTrue(self.grepOutput(' TimerNotify '))
self.assertFalse(self.grepOutput(' OK$'))
def test_knxip_keyring_xml_import(self, cmd_tshark, dirs, capture_file):
'''KNX/IP: keyring.xml import'''
# key_file "keyring.xml" contains KNX decryption keys
key_file = os.path.join(dirs.key_dir, 'knx_keyring.xml')
# capture_file('empty.pcap') is empty
# Write extracted key info to stdout
self.assertRun((cmd_tshark,
'-o', 'kip.key_file:' + key_file,
'-o', 'kip.key_info_file:-',
'-r', capture_file('empty.pcap'),
))
self.assertTrue(self.grepOutput('^MCA 224[.]0[.]23[.]12 key A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF$'))
self.assertTrue(self.grepOutput('^GA 1/7/131 sender 1[.]1[.]1$'))
self.assertTrue(self.grepOutput('^GA 1/7/131 sender 1[.]1[.]3$'))
self.assertTrue(self.grepOutput('^GA 1/7/131 sender 1[.]1[.]4$'))
self.assertTrue(self.grepOutput('^GA 1/7/132 sender 1[.]1[.]2$'))
self.assertTrue(self.grepOutput('^GA 1/7/132 sender 1[.]1[.]4$'))
self.assertTrue(self.grepOutput('^GA 6/7/191 sender 1[.]1[.]1$'))
self.assertTrue(self.grepOutput('^GA 0/1/0 sender 1[.]1[.]1$'))
self.assertTrue(self.grepOutput('^GA 0/1/0 sender 1[.]1[.]3$'))
self.assertTrue(self.grepOutput('^GA 0/1/0 sender 1[.]1[.]4$'))
self.assertTrue(self.grepOutput('^GA 0/1/0 key A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF$'))
self.assertTrue(self.grepOutput('^GA 1/7/131 key A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF$'))
self.assertTrue(self.grepOutput('^GA 1/7/132 key A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF$'))
self.assertTrue(self.grepOutput('^GA 6/7/191 key A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]1 key B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]1 SeqNr 45678$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]2 key B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]2 SeqNr 34567$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]3 key B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]3 SeqNr 23456$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]4 key B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF$'))
self.assertTrue(self.grepOutput('^IA 1[.]1[.]4 SeqNr 12345$'))
self.assertTrue(self.grepOutput('^IA 2[.]1[.]0 key B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF$'))
self.assertTrue(self.grepOutput('^IA 2[.]1[.]0 SeqNr 1234$'))
@fixtures.fixture(scope='session')
def softhsm_paths(features):
if sys.platform == 'win32':
search_path = os.getenv('PATH') + r';C:\SoftHSM2\bin'
else:
search_path = None
softhsm_tool = shutil.which('softhsm2-util', path=search_path)
if not softhsm_tool:
# Note: do not fallback to SoftHSMv1. While available on Ubuntu 14.04
# (and 16.04), it is built with botan < 1.11.10 which causes a crash due
# to a conflict with the GMP library that is also used by GnuTLS/nettle.
# See https://github.com/randombit/botan/issues/1090
fixtures.skip('SoftHSM is not found')
# Find provider library path.
bindir = os.path.dirname(softhsm_tool)
libdir = os.path.join(os.path.dirname(bindir), 'lib')
if sys.platform == 'win32':
libdirs = [libdir, bindir]
if features.have_x64:
name = 'softhsm2-x64.dll'
else:
name = 'softhsm2.dll'
else:
# Debian/Ubuntu-specific paths
madir = sysconfig.get_config_var('multiarchsubdir')
libdir64_sub = os.path.join(libdir + '64', 'softhsm')
libdir_sub = os.path.join(libdir, 'softhsm')
libdirs = [os.path.join(libdir + madir, 'softhsm')] if madir else []
libdirs += [libdir_sub, libdir64_sub]
name = 'libsofthsm2.so'
for libdir in libdirs:
provider = os.path.join(libdir, name)
if os.path.exists(provider):
break
else:
# Even if p11-kit can automatically locate it, do not rely on it.
fixtures.skip('SoftHSM provider library not detected')
# Now check whether the import tool is usable. SoftHSM < 2.3.0 did not
# set CKA_DECRYPT when using softhsm2-tool --import and therefore cannot be
# used to import keys for decryption. Use GnuTLS p11tool as workaround.
softhsm_version = subprocess.check_output([softhsm_tool, '--version'],
universal_newlines=True).strip()
use_p11tool = softhsm_version in ('2.0.0', '2.1.0', '2.2.0')
if use_p11tool and not shutil.which('p11tool'):
fixtures.skip('SoftHSM available, but GnuTLS p11tool is unavailable')
return use_p11tool, softhsm_tool, provider
@fixtures.fixture
def softhsm(softhsm_paths, home_path, base_env):
'''Creates a temporary SoftHSM token store (and set it in the environment),
returns a function to populate that token store and the path to the PKCS #11
provider library.'''
use_p11tool, softhsm_tool, provider = softhsm_paths
conf_path = os.path.join(home_path, 'softhsm-test.conf')
db_path = os.path.join(home_path, 'softhsm-test-tokens')
os.makedirs(db_path)
with open(conf_path, 'w') as f:
f.write('directories.tokendir = %s\n' % db_path)
f.write('objectstore.backend = file\n')
# Avoid syslog spam
f.write('log.level = ERROR\n')
base_env['SOFTHSM2_CONF'] = conf_path
tool_env = base_env.copy()
if sys.platform == 'win32':
# Ensure that softhsm2-util can find the library.
tool_env['PATH'] += ';%s' % os.path.dirname(provider)
# Initialize tokens store.
token_name = 'Wireshark-Test-Tokens'
pin = 'Secret'
subprocess.check_call([softhsm_tool, '--init-token', '--slot', '0',
'--label', token_name, '--so-pin', 'Supersecret', '--pin', pin],
env=tool_env)
if use_p11tool:
tool_env['GNUTLS_PIN'] = pin
# Arbitrary IDs and labels.
ids = iter(range(0xab12, 0xffff))
def import_key(keyfile):
'''Returns a PKCS #11 URI to identify the imported key.'''
label = os.path.basename(keyfile)
obj_id = '%x' % next(ids)
if not use_p11tool:
tool_args = [softhsm_tool, '--import', keyfile, '--label', label,
'--id', obj_id, '--pin', pin, '--token', token_name]
else:
# Fallback for SoftHSM < 2.3.0
tool_args = ['p11tool', '--provider', provider, '--batch',
'--login', '--write', 'pkcs11:token=%s' % token_name,
'--load-privkey', keyfile, '--label', label, '--id', obj_id]
subprocess.check_call(tool_args, env=tool_env)
id_str = '%{}{}%{}{}'.format(*obj_id)
return 'pkcs11:token=%s;id=%s;type=private' % (token_name, id_str)
return types.SimpleNamespace(import_key=import_key, provider=provider, pin=pin)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_pkcs11(subprocesstest.SubprocessTestCase):
def test_tls_pkcs11(self, cmd_tshark, dirs, capture_file, features, softhsm):
'''Check that a RSA key in a PKCS #11 token enables decryption.'''
if not features.have_pkcs11:
self.skipTest('Requires GnuTLS with PKCS #11 support.')
key_file = os.path.join(dirs.key_dir, 'rsa-p-lt-q.p8')
key_uri = softhsm.import_key(key_file)
proc = self.assertRun((cmd_tshark,
'-r', capture_file('rsa-p-lt-q.pcap'),
'-o', 'uat:pkcs11_libs:"{}"'.format(softhsm.provider.replace('\\', '\\x5c')),
'-o', 'uat:rsa_keys:"{}","{}"'.format(key_uri, softhsm.pin),
'-Tfields',
'-e', 'http.request.uri',
'-Y', 'http',
))
self.assertIn('/', proc.stdout_str)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_decrypt_smb2(subprocesstest.SubprocessTestCase):
BAD_KEY = 'ffffffffffffffffffffffffffffffff'
def check_bad_key(self, cmd_tshark, cap, disp_filter, sesid, seskey, s2ckey, c2skey):
proc = self.assertRun((cmd_tshark,
'-r', cap,
'-o', 'uat:smb2_seskey_list:{},{},{},{}'.format(sesid, seskey, s2ckey, c2skey),
'-Y', disp_filter,
))
self.assertIn('Encrypted SMB', proc.stdout_str)
#
# SMB3.0 CCM bad keys tests
#
def test_smb300_bad_seskey(self, features, cmd_tshark, capture_file):
'''Check that a bad session key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
'frame.number == 7', '1900009c003c0000', self.BAD_KEY, '""', '""')
def test_smb300_bad_s2ckey(self, features, cmd_tshark, capture_file):
'''Check that a bad s2c key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
'frame.number == 7', '1900009c003c0000', '""', self.BAD_KEY, '""')
def test_smb300_bad_c2skey(self, features, cmd_tshark, capture_file):
'''Check that a bad c2s key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
'frame.number == 7', '1900009c003c0000', '""', '""', self.BAD_KEY)
def test_smb300_bad_deckey(self, features, cmd_tshark, capture_file):
'''Check that bad decryption keys doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
'frame.number == 7', '1900009c003c0000', '""', self.BAD_KEY, self.BAD_KEY)
def test_smb300_bad_allkey(self, features, cmd_tshark, capture_file):
'''Check that all bad keys doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
'frame.number == 7', '1900009c003c0000', self.BAD_KEY, self.BAD_KEY, self.BAD_KEY)
#
# SMB3.1.1 CCM bad key tests
#
def test_smb311_bad_seskey(self, features, cmd_tshark, capture_file):
'''Check that a bad session key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
'frame.number == 7', '2900009c003c0000', self.BAD_KEY, '""', '""')
def test_smb311_bad_s2ckey(self, features, cmd_tshark, capture_file):
'''Check that a bad s2c key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
'frame.number == 7', '2900009c003c0000', '""', self.BAD_KEY, '""')
def test_smb311_bad_c2skey(self, features, cmd_tshark, capture_file):
'''Check that a bad c2s key doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
'frame.number == 7', '2900009c003c0000', '""', '""', self.BAD_KEY)
def test_smb311_bad_deckey(self, features, cmd_tshark, capture_file):
'''Check that bad decryption keys doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
'frame.number == 7', '2900009c003c0000', '""', self.BAD_KEY, self.BAD_KEY)
def test_smb311_bad_allkey(self, features, cmd_tshark, capture_file):
'''Check that all bad keys doesn't crash'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_bad_key(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
'frame.number == 7', '2900009c003c0000', self.BAD_KEY, self.BAD_KEY, self.BAD_KEY)
#
# Decryption tests
#
def check_tree(self, cmd_tshark, cap, tree, sesid, seskey, s2ckey, c2skey):
proc = self.assertRun((cmd_tshark,
'-r', cap,
'-o', 'uat:smb2_seskey_list:{},{},{},{}'.format(sesid, seskey, s2ckey, c2skey),
'-Tfields',
'-e', 'smb2.tree',
'-Y', 'smb2.tree == "{}"'.format(tree.replace('\\', '\\\\')),
))
self.assertEqual(tree, proc.stdout_str.strip())
# SMB3.0 CCM
def test_smb300_aes128ccm_seskey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.0 AES128CCM decryption with session key.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '1900009c003c0000',
'9a9ea16a0cdbeb6064772318073f172f', '""', '""')
def test_smb300_aes128ccm_deckey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.0 AES128CCM decryption with decryption keys.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb300-aes-128-ccm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '1900009c003c0000',
'""', '8be6cc53d4beba29387e69aef035d497','bff985870e81784d533fdc09497b8eab')
# SMB3.1.1 CCM
def test_smb311_aes128ccm_seskey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128CCM decryption with session key.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '2900009c003c0000',
'f1fa528d3cd182cca67bd4596dabd885', '""', '""')
def test_smb311_aes128ccm_deckey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128CCM decryption with decryption keys.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb311-aes-128-ccm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '2900009c003c0000',
'""', '763d5552dbc9650b700869467a5857e4', '35e69833c6578e438c8701cb40bf483e')
# SMB3.1.1 GCM
def test_smb311_aes128gcm_seskey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128GCM decryption with session key.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb311-aes-128-gcm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '3900000000400000',
'e79161ded03bda1449b2c8e58f753953', '""', '""')
def test_smb311_aes128gcm_deckey(self, features, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128GCM decryption with decryption keys.'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_tree(cmd_tshark, capture_file('smb311-aes-128-gcm.pcap.gz'),
r'\\dfsroot1.foo.test\IPC$', '3900000000400000',
'""', 'b02f5de25e0562075c3dc329fa2aa396', '7201623a31754e6581864581209dd3d2')
def check_partial(self, home_path, cmd_tshark, full_cap, pkt_skip, tree, sesid, s2ckey, c2skey):
# generate a trace without NegProt and SessionSetup
partial_cap = os.path.join(home_path, 'short.pcap')
self.assertRun((cmd_tshark,
'-r', full_cap,
'-Y', 'frame.number >= %d'%pkt_skip,
'-w', partial_cap,
))
self.check_tree(cmd_tshark, partial_cap, tree, sesid, '""', s2ckey, c2skey)
def test_smb311_aes128gcm_partial(self, features, home_path, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128GCM decryption in capture missing session setup'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_partial(home_path, cmd_tshark,
capture_file('smb311-aes-128-gcm.pcap.gz'), 7,
r'\\dfsroot1.foo.test\IPC$', '3900000000400000',
'b02f5de25e0562075c3dc329fa2aa396', '7201623a31754e6581864581209dd3d2')
def test_smb311_aes128gcm_partial_keyswap(self, features, home_path, cmd_tshark, capture_file):
'''Check SMB 3.1.1 AES128GCM decryption in capture missing session setup with keys in wrong order'''
if not features.have_libgcrypt16:
self.skipTest('Requires GCrypt 1.6 or later.')
self.check_partial(home_path, cmd_tshark,
capture_file('smb311-aes-128-gcm.pcap.gz'), 7,
r'\\dfsroot1.foo.test\IPC$', '3900000000400000',
'7201623a31754e6581864581209dd3d2', 'b02f5de25e0562075c3dc329fa2aa396')
|
hkchenhongyi/django | refs/heads/master | django/contrib/gis/geos/libgeos.py | 345 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory(object):
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, **kwargs):
self.func_name = func_name
self.restype = kwargs.pop('restype', self.restype)
self.errcheck = kwargs.pop('errcheck', self.errcheck)
self.argtypes = kwargs.pop('argtypes', self.argtypes)
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
|
cjbe/artiqDrivers | refs/heads/master | artiqDrivers/frontend/coherentDds_controller.py | 1 | #!/usr/bin/env python3.5
import argparse
import sys
from artiqDrivers.devices.coherentDds.driver import CoherentDds, CoherentDdsSim
from sipyco.pc_rpc import simple_server_loop
from sipyco.common_args import simple_network_args, init_logger_from_args
from oxart.tools import add_common_args
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--device", default=None,
help="serial device. See documentation for how to "
"specify a USB Serial Number.")
parser.add_argument("--simulation", action="store_true",
help="Put the driver in simulation mode, even if "
"--device is used.")
parser.add_argument("--clockfreq", default=1e9, type=float,
help="clock frequency provided to DDS")
parser.add_argument("--internal-clock", action="store_true")
parser.add_argument("--disable-coherence", action="append",
help="disable coherent switching (=no phase glitches) "
"for a given channel")
simple_network_args(parser, 4000)
add_common_args(parser)
return parser
def main():
args = get_argparser().parse_args()
init_logger_from_args(args)
incoherent_channels = [False]*4
if args.disable_coherence:
for arg in args.disable_coherence:
ch = int(arg)
if ch < 0 or ch > 3:
raise ValueError("channel must be in 0-3")
incoherent_channels[ch] = True
if not args.simulation and args.device is None:
print("You need to specify either --simulation or -d/--device "
"argument. Use --help for more information.")
sys.exit(1)
if args.simulation:
dev = CoherentDdsSim()
else:
dev = CoherentDds(addr=args.device, clockFreq=args.clockfreq,
internal_clock=args.internal_clock,
incoherent_channels=incoherent_channels)
simple_server_loop({"coherentDds": dev}, args.bind, args.port)
if __name__ == "__main__":
main()
|
zhang-shengping/racoon | refs/heads/master | racoon/__init__.py | 1 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'racoon').version_string()
|
DistrictDataLabs/yellowbrick | refs/heads/develop | tests/test_features/test_pcoords.py | 1 | # tests.test_features.test_pcoords
# Testing for the parallel coordinates feature visualizers
#
# Author: Benjamin Bengfort
# Author: @thekylesaurus
# Created: Thu Oct 06 11:21:27 2016 -0400
#
# Copyright (C) 2017 The scikit-yb developers.
# For license information, see LICENSE.txt
#
# ID: test_pcoords.py [1d407ab] benjamin@bengfort.com $
"""
Testing for the parallel coordinates feature visualizers
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
from yellowbrick.datasets import load_occupancy
from yellowbrick.features.pcoords import *
from tests.base import VisualTestCase
from ..fixtures import Dataset
from sklearn.datasets import make_classification
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def dataset(request):
"""
Creates a random multiclass classification dataset fixture
"""
X, y = make_classification(
n_samples=200,
n_features=5,
n_informative=4,
n_redundant=0,
n_classes=3,
n_clusters_per_class=1,
random_state=451,
flip_y=0,
class_sep=3,
scale=np.array([1.0, 2.0, 100.0, 20.0, 1.0]),
)
dataset = Dataset(X, y)
request.cls.dataset = dataset
##########################################################################
## Parallel Coordinates Tests
##########################################################################
@pytest.mark.usefixtures("dataset")
class TestParallelCoordinates(VisualTestCase):
"""
Test the ParallelCoordinates visualizer
"""
def test_parallel_coords(self):
"""
Test images closeness on random 3 class dataset
"""
visualizer = ParallelCoordinates()
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_parallel_coords_fast(self):
"""
Test images closeness on random 3 class dataset in fast mode
"""
visualizer = ParallelCoordinates(fast=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_alpha(self):
"""
Test image closeness on opaque alpha for random 3 class dataset
"""
visualizer = ParallelCoordinates(alpha=1.0)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_alpha_fast(self):
"""
Test image closeness on opaque alpha for random 3 class dataset in fast mode
"""
visualizer = ParallelCoordinates(alpha=1.0, fast=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_labels(self):
"""
Test image closeness when class and feature labels are supplied
"""
visualizer = ParallelCoordinates(
classes=["a", "b", "c"], features=["f1", "f2", "f3", "f4", "f5"]
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer)
def test_labels_fast(self):
"""
Test image closeness when class and feature labels are supplied in fast mode
"""
visualizer = ParallelCoordinates(
classes=["a", "b", "c"], features=["f1", "f2", "f3", "f4", "f5"], fast=True
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer)
def test_normalized_l2(self):
"""
Test image closeness on l2 normalized 3 class dataset
"""
visualizer = ParallelCoordinates(normalize="l2")
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_normalized_l2_fast(self):
"""
Test image closeness on l2 normalized 3 class dataset in fast mode
"""
visualizer = ParallelCoordinates(normalize="l2", fast=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_normalized_minmax(self):
"""
Test image closeness on minmax normalized 3 class dataset
"""
visualizer = ParallelCoordinates(normalize="minmax")
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_normalized_minmax_fast(self):
"""
Test image closeness on minmax normalized 3 class dataset in fast mode
"""
visualizer = ParallelCoordinates(normalize="minmax", fast=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.25)
def test_parallel_coordinates_quickmethod(self):
"""
Test the quick method producing a valid visualization
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
# Compare the images
# Use only the first 100 samples so the test will run faster
visualizer = parallel_coordinates(X, y, sample=100, show=False)
self.assert_images_similar(visualizer)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration_sampled(self):
"""
Test on a real dataset with pandas DataFrame and Series sampled for speed
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_pandas()
classes = [
k for k, _ in sorted(data.meta["labels"].items(), key=lambda i: i[1])
]
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
oz = ParallelCoordinates(
sample=0.05, shuffle=True, random_state=4291, classes=classes
)
oz.fit_transform(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
def test_numpy_integration_sampled(self):
"""
Ensure visualizer works in default case with numpy arrays and sampling
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_numpy()
classes = [
k for k, _ in sorted(data.meta["labels"].items(), key=lambda i: i[1])
]
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
oz = ParallelCoordinates(
sample=0.05, shuffle=True, random_state=4291, classes=classes
)
oz.fit_transform(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration_fast(self):
"""
Test on a real dataset with pandas DataFrame and Series in fast mode
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_pandas()
classes = [
k for k, _ in sorted(data.meta["labels"].items(), key=lambda i: i[1])
]
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
oz = ParallelCoordinates(fast=True, classes=classes)
oz.fit_transform(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
def test_numpy_integration_fast(self):
"""
Ensure visualizer works in default case with numpy arrays and fast mode
"""
data = load_occupancy(return_dataset=True)
X, y = data.to_numpy()
classes = [
k for k, _ in sorted(data.meta["labels"].items(), key=lambda i: i[1])
]
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
oz = ParallelCoordinates(fast=True, classes=classes)
oz.fit_transform(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=0.1)
def test_normalized_invalid_arg(self):
"""
Invalid argument to 'normalize' should raise
"""
with pytest.raises(YellowbrickValueError):
ParallelCoordinates(normalize="foo")
def test_sample_int(self):
"""
Assert no errors occur using integer 'sample' argument
"""
visualizer = ParallelCoordinates(sample=10)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_int_shuffle(self):
"""
Assert no errors occur using integer 'sample' argument and shuffle, with different random_state args
"""
visualizer = ParallelCoordinates(sample=3, shuffle=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(sample=3, shuffle=True, random_state=444)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(
sample=3, shuffle=True, random_state=np.random.RandomState()
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_int_shuffle_false(self):
"""
Assert no errors occur using integer 'sample' argument and shuffle, with different random_state args
"""
visualizer = ParallelCoordinates(sample=3, shuffle=False)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(sample=3, shuffle=False, random_state=444)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(
sample=3, shuffle=False, random_state=np.random.RandomState()
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_int_invalid(self):
"""
Negative int values should raise exception
"""
with pytest.raises(YellowbrickValueError):
ParallelCoordinates(sample=-1)
def test_sample_float(self):
"""
Assert no errors occur using float 'sample' argument
"""
visualizer = ParallelCoordinates(sample=0.5)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_float_shuffle(self):
"""
Assert no errors occur using float 'sample' argument and shuffle, with different random_state args
"""
visualizer = ParallelCoordinates(sample=0.5, shuffle=True)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(sample=0.5, shuffle=True, random_state=444)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(
sample=0.5, shuffle=True, random_state=np.random.RandomState()
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_float_shuffle_false(self):
"""
Assert no errors occur using float 'sample' argument and shuffle, with different random_state args
"""
visualizer = ParallelCoordinates(sample=0.5, shuffle=False)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(sample=0.5, shuffle=False, random_state=444)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
visualizer = ParallelCoordinates(
sample=0.5, shuffle=False, random_state=np.random.RandomState()
)
visualizer.fit_transform(self.dataset.X, self.dataset.y)
def test_sample_float_invalid(self):
"""
Float values for 'sample' argument outside [0,1] should raise.
"""
with pytest.raises(YellowbrickValueError):
ParallelCoordinates(sample=-0.2)
with pytest.raises(YellowbrickValueError):
ParallelCoordinates(sample=1.1)
def test_sample_invalid_type(self):
"""
Non-numeric values for 'sample' argument should raise.
"""
with pytest.raises(YellowbrickTypeError):
ParallelCoordinates(sample="foo")
@staticmethod
def test_static_subsample():
"""
Assert output of subsampling method against expectations
"""
ntotal = 100
ncols = 50
y = np.arange(ntotal)
X = np.ones((ntotal, ncols)) * y.reshape(ntotal, 1)
visualizer = ParallelCoordinates(sample=1.0, random_state=None, shuffle=False)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X)
assert np.array_equal(yprime, y)
visualizer = ParallelCoordinates(sample=200, random_state=None, shuffle=False)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X)
assert np.array_equal(yprime, y)
sample = 50
visualizer = ParallelCoordinates(
sample=sample, random_state=None, shuffle=False
)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[:sample, :])
assert np.array_equal(yprime, y[:sample])
sample = 50
visualizer = ParallelCoordinates(sample=sample, random_state=None, shuffle=True)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[yprime.flatten(), :])
assert len(Xprime) == sample
assert len(yprime) == sample
visualizer = ParallelCoordinates(sample=0.5, random_state=None, shuffle=False)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[: int(ntotal / 2), :])
assert np.array_equal(yprime, y[: int(ntotal / 2)])
sample = 0.5
visualizer = ParallelCoordinates(sample=sample, random_state=None, shuffle=True)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[yprime.flatten(), :])
assert len(Xprime) == ntotal * sample
assert len(yprime) == ntotal * sample
sample = 0.25
visualizer = ParallelCoordinates(sample=sample, random_state=444, shuffle=True)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[yprime.flatten(), :])
assert len(Xprime) == ntotal * sample
assert len(yprime) == ntotal * sample
sample = 0.99
visualizer = ParallelCoordinates(
sample=sample, random_state=np.random.RandomState(), shuffle=True
)
Xprime, yprime = visualizer._subsample(X, y)
assert np.array_equal(Xprime, X[yprime.flatten(), :])
assert len(Xprime) == ntotal * sample
assert len(yprime) == ntotal * sample
|
haad/ansible-modules-extras | refs/heads/devel | windows/win_acl_inheritance.py | 59 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_acl_inheritance
version_added: "2.1"
short_description: Change ACL inheritance
description:
- Change ACL (Access Control List) inheritance and optionally copy inherited ACE's (Access Control Entry) to dedicated ACE's or vice versa.
options:
path:
description:
- Path to be used for changing inheritance
required: true
state:
description:
- Specify whether to enable I(present) or disable I(absent) ACL inheritance
required: false
choices:
- present
- absent
default: absent
reorganize:
description:
- For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary (in combination with removal) for a simple ACL instead of using multiple ACE deny entries.
- For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity of the ACL structure.
required: false
choices:
- no
- yes
default: no
author: Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = '''
# Playbook example
---
- name: Disable inherited ACE's
win_acl_inheritance:
path: 'C:\\apache\\'
state: absent
- name: Disable and copy inherited ACE's
win_acl_inheritance:
path: 'C:\\apache\\'
state: absent
reorganize: yes
- name: Enable and remove dedicated ACE's
win_acl_inheritance:
path: 'C:\\apache\\'
state: present
reorganize: yes
'''
RETURN = '''
''' |
ity/pants | refs/heads/master | tests/python/pants_test/goal/test_artifact_cache_stats.py | 17 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
import requests
from pants.cache.artifact import ArtifactError
from pants.cache.artifact_cache import NonfatalArtifactCacheError, UnreadableArtifact
from pants.goal.artifact_cache_stats import ArtifactCacheStats
from pants.util.contextutil import temporary_dir
from pants_test.base_test import BaseTest
class ArtifactCacheStatsTest(BaseTest):
TEST_CACHE_NAME_1 = 'ZincCompile'
TEST_CACHE_NAME_2 = 'Checkstyle_test_checkstyle'
TEST_LOCAL_ERROR = UnreadableArtifact('foo', ArtifactError('CRC check failed'))
TEST_REMOTE_ERROR = UnreadableArtifact(
'bar',
NonfatalArtifactCacheError(requests.exceptions.ConnectionError('Read time out'))
)
TEST_SPEC_A = 'src/scala/a'
TEST_SPEC_B = 'src/scala/b'
TEST_SPEC_C = 'src/java/c'
def setUp(self):
super(ArtifactCacheStatsTest, self).setUp()
self.target_a = self.make_target(spec=self.TEST_SPEC_A)
self.target_b = self.make_target(spec=self.TEST_SPEC_B)
self.target_c = self.make_target(spec=self.TEST_SPEC_C)
def test_add_hits(self):
expected_stats = [
{
'cache_name': self.TEST_CACHE_NAME_2,
'num_hits': 0,
'num_misses': 1,
'hits': [],
'misses': [(self.TEST_SPEC_A, str(self.TEST_LOCAL_ERROR.err))]
},
{
'cache_name': self.TEST_CACHE_NAME_1,
'num_hits': 1,
'num_misses': 1,
'hits': [(self.TEST_SPEC_B, '')],
'misses': [(self.TEST_SPEC_C, str(self.TEST_REMOTE_ERROR.err))]
},
]
expected_hit_or_miss_files = {
'{}.misses'.format(self.TEST_CACHE_NAME_2):
'{} {}\n'.format(self.TEST_SPEC_A, str(self.TEST_LOCAL_ERROR.err)),
'{}.hits'.format(self.TEST_CACHE_NAME_1):
'{}\n'.format(self.TEST_SPEC_B),
'{}.misses'.format(self.TEST_CACHE_NAME_1):
'{} {}\n'.format(self.TEST_SPEC_C, str(self.TEST_REMOTE_ERROR.err)),
}
with self.mock_artifact_cache_stats(expected_stats,
expected_hit_or_miss_files=expected_hit_or_miss_files)\
as artifact_cache_stats:
artifact_cache_stats.add_hits(self.TEST_CACHE_NAME_1, [self.target_b])
artifact_cache_stats.add_misses(self.TEST_CACHE_NAME_1, [self.target_c],
[self.TEST_REMOTE_ERROR])
artifact_cache_stats.add_misses(self.TEST_CACHE_NAME_2, [self.target_a],
[self.TEST_LOCAL_ERROR])
@contextmanager
def mock_artifact_cache_stats(self,
expected_stats,
expected_hit_or_miss_files=None):
with temporary_dir() as tmp_dir:
artifact_cache_stats = ArtifactCacheStats(tmp_dir)
yield artifact_cache_stats
self.assertEquals(expected_stats, artifact_cache_stats.get_all())
self.assertEquals(sorted(list(expected_hit_or_miss_files.keys())),
sorted(os.listdir(tmp_dir)))
for hit_or_miss_file in expected_hit_or_miss_files.keys():
with open(os.path.join(tmp_dir, hit_or_miss_file)) as hit_or_miss_saved:
self.assertEquals(expected_hit_or_miss_files[hit_or_miss_file], hit_or_miss_saved.read())
|
neudesk/neucloud | refs/heads/master | openstack_dashboard/dashboards/router/nexus1000v/tests.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used and when not.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
class Nexus1000vTest(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('profile_list',
'profile_bindings_list'),
api.keystone: ('tenant_list',)})
def test_index(self):
tenants = self.tenants.list()
net_profiles = self.net_profiles.list()
policy_profiles = self.policy_profiles.list()
net_profile_binding = self.network_profile_binding.list()
policy_profile_binding = self.policy_profile_binding.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.profile_bindings_list(
IsA(http.HttpRequest),
'network').AndReturn(net_profile_binding)
api.neutron.profile_bindings_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profile_binding)
api.keystone.tenant_list(
IsA(http.HttpRequest)).AndReturn([tenants, False])
api.keystone.tenant_list(
IsA(http.HttpRequest)).AndReturn([tenants, False])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:router:nexus1000v:index'))
self.assertTemplateUsed(res, 'router/nexus1000v/index.html')
|
wilvk/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/elb_application_lb_facts.py | 46 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_application_lb_facts
short_description: Gather facts about application ELBs in AWS
description:
- Gather facts about application ELBs in AWS
version_added: "2.4"
requirements: [ boto3 ]
author: Rob White (@wimnat)
options:
load_balancer_arns:
description:
- The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
required: false
names:
description:
- The names of the load balancers.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_application_lb_facts:
# Gather facts about the target group attached to a particular ELB
- elb_application_lb_facts:
load_balancer_arns:
- "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_application_lb_facts:
names:
- elb1
- elb2
'''
RETURN = '''
load_balancers:
description: a list of load balancers
returned: always
type: complex
contains:
access_logs_s3_bucket:
description: The name of the S3 bucket for the access logs.
returned: when status is present
type: string
sample: mys3bucket
access_logs_s3_enabled:
description: Indicates whether access logs stored in Amazon S3 are enabled.
returned: when status is present
type: string
sample: true
access_logs_s3_prefix:
description: The prefix for the location in the S3 bucket.
returned: when status is present
type: string
sample: /my/logs
availability_zones:
description: The Availability Zones for the load balancer.
returned: when status is present
type: list
sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
canonical_hosted_zone_id:
description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
returned: when status is present
type: string
sample: ABCDEF12345678
created_time:
description: The date and time the load balancer was created.
returned: when status is present
type: string
sample: "2015-02-12T02:14:02+00:00"
deletion_protection_enabled:
description: Indicates whether deletion protection is enabled.
returned: when status is present
type: string
sample: true
dns_name:
description: The public DNS name of the load balancer.
returned: when status is present
type: string
sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
idle_timeout_timeout_seconds:
description: The idle timeout value, in seconds.
returned: when status is present
type: string
sample: 60
ip_address_type:
description: The type of IP addresses used by the subnets for the load balancer.
returned: when status is present
type: string
sample: ipv4
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when status is present
type: string
sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
load_balancer_name:
description: The name of the load balancer.
returned: when status is present
type: string
sample: my-elb
scheme:
description: Internet-facing or internal load balancer.
returned: when status is present
type: string
sample: internal
security_groups:
description: The IDs of the security groups for the load balancer.
returned: when status is present
type: list
sample: ['sg-0011223344']
state:
description: The state of the load balancer.
returned: when status is present
type: dict
sample: "{'code': 'active'}"
tags:
description: The tags attached to the load balancer.
returned: when status is present
type: dict
sample: "{
'Tag': 'Example'
}"
type:
description: The type of load balancer.
returned: when status is present
type: string
sample: application
vpc_id:
description: The ID of the VPC for the load balancer.
returned: when status is present
type: string
sample: vpc-0011223344
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_elb_listeners(connection, module, elb_arn):
try:
return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_listener_rules(connection, module, listener_arn):
try:
return connection.describe_rules(ListenerArn=listener_arn)['Rules']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_load_balancer_attributes(connection, module, load_balancer_arn):
try:
load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in list(load_balancer_attributes.items()):
load_balancer_attributes[k.replace('.', '_')] = v
del load_balancer_attributes[k]
return load_balancer_attributes
def get_load_balancer_tags(connection, module, load_balancer_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_load_balancers(connection, module):
load_balancer_arns = module.params.get("load_balancer_arns")
names = module.params.get("names")
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
if not load_balancer_arns and not names:
load_balancers = load_balancer_paginator.paginate().build_full_result()
if load_balancer_arns:
load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
if names:
load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
module.exit_json(load_balancers=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
for load_balancer in load_balancers['LoadBalancers']:
# Get the attributes for each elb
load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
# Get the listeners for each elb
load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
# For each listener, get listener rules
for listener in load_balancer['listeners']:
listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
# Get tags for each load balancer
for snaked_load_balancer in snaked_load_balancers:
snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
module.exit_json(load_balancers=snaked_load_balancers)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_load_balancers(connection, module)
if __name__ == '__main__':
main()
|
dmrschmidt/the_beet | refs/heads/master | main.py | 1 | #! /usr/bin/python3
from diode.blinker import Blinker
def main():
blinker = Blinker()
blinker.blink()
if __name__ == "__main__":
main()
|
cryptobanana/ansible | refs/heads/devel | test/units/modules/cloud/amazon/test_ec2_utils.py | 231 | import unittest
from ansible.module_utils.ec2 import map_complex_type
class Ec2Utils(unittest.TestCase):
def test_map_complex_type_over_dict(self):
complex_type = {'minimum_healthy_percent': "75", 'maximum_percent': "150"}
type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'}
complex_type_mapped = map_complex_type(complex_type, type_map)
complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150}
self.assertEqual(complex_type_mapped, complex_type_expected)
|
bohanapp/gaoyuan.org | refs/heads/master | node_modules/hexo-renderer-scss/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
Nitaco/ansible | refs/heads/devel | contrib/inventory/softlayer.py | 5 | #!/usr/bin/env python
"""
SoftLayer external inventory script.
The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
You have a few different options for configuring your username and api_key. You can pass
environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
- https://softlayer-python.readthedocs.io/en/latest/config_file.html
The SoftLayer Python client has a built in command for saving this configuration file
via the command `sl config setup`.
"""
# Copyright (C) 2014 AJ Bourg <aj@ajbourg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# I found the structure of the ec2.py script very helpful as an example
# as I put this together. Thanks to whoever wrote that script!
#
import SoftLayer
import re
import argparse
import itertools
try:
import json
except:
import simplejson as json
class SoftLayerInventory(object):
common_items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
'tagReferences.tag.name',
'userData.value',
]
vs_items = [
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
self.inventory = self._empty_inventory()
self.parse_options()
if self.args.list:
self.get_all_servers()
print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_virtual_servers()
print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
def push(self, my_dict, key, element):
'''Push an element onto an array that may not have been defined in the dict'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def parse_options(self):
'''Parse all the arguments from the CLI'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
'''Converts a dict to a JSON object and dumps it as a formatted string'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def process_instance(self, instance, instance_type="virtual"):
'''Populate the inventory dictionary with any instance information'''
# only want active instances
if 'status' in instance and instance['status']['name'] != 'Active':
return
# and powered on instances
if 'powerState' in instance and instance['powerState']['name'] != 'Running':
return
# 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
return
# if there's no IP address, we can't reach it
if 'primaryIpAddress' not in instance:
return
instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
dest = instance['primaryIpAddress']
self.inventory["_meta"]["hostvars"][dest] = instance
# Inventory: group by memory
if 'maxMemory' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
elif 'memoryCapacity' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
# Inventory: group by cpu count
if 'maxCpu' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
elif 'processorPhysicalCoreAmount' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
# Inventory: group by datacenter
self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
# Inventory: group by hostname
self.push(self.inventory, self.to_safe(instance['hostname']), dest)
# Inventory: group by FQDN
self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
# Inventory: group by domain
self.push(self.inventory, self.to_safe(instance['domain']), dest)
# Inventory: group by type (hardware/virtual)
self.push(self.inventory, instance_type, dest)
# Inventory: group by tag
for tag in instance['tagReferences']:
self.push(self.inventory, tag['tag']['name'], dest)
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
self.process_instance(instance)
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:
self.process_instance(instance, 'hardware')
def get_all_servers(self):
self.client = SoftLayer.Client()
self.get_virtual_servers()
self.get_physical_servers()
SoftLayerInventory()
|
mjirayu/sit_academy | refs/heads/master | lms/djangoapps/instructor_analytics/tests/test_basic.py | 32 | """
Tests for instructor.basic
"""
import json
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from django.core.urlresolvers import reverse
from mock import patch
from student.roles import CourseSalesAdminRole
from student.tests.factories import UserFactory, CourseModeFactory
from shoppingcart.models import (
CourseRegistrationCode, RegistrationCodeRedemption, Order,
Invoice, Coupon, CourseRegCodeItem, CouponRedemption, CourseRegistrationCodeInvoiceItem
)
from course_modes.models import CourseMode
from instructor_analytics.basic import (
sale_record_features, sale_order_record_features, enrolled_students_features,
course_registration_features, coupon_codes_features, list_may_enroll,
AVAILABLE_FEATURES, STUDENT_FEATURES, PROFILE_FEATURES
)
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from courseware.tests.factories import InstructorFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
import datetime
from django.db.models import Q
import pytz
class TestAnalyticsBasic(ModuleStoreTestCase):
""" Test basic analytics functions. """
def setUp(self):
super(TestAnalyticsBasic, self).setUp()
self.course_key = self.store.make_course_key('robot', 'course', 'id')
self.users = tuple(UserFactory() for _ in xrange(30))
self.ces = tuple(CourseEnrollment.enroll(user, self.course_key)
for user in self.users)
self.instructor = InstructorFactory(course_key=self.course_key)
for user in self.users:
user.profile.meta = json.dumps({
"position": "edX expert {}".format(user.id),
"company": "Open edX Inc {}".format(user.id),
})
user.profile.save()
self.students_who_may_enroll = list(self.users) + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course_key
)
def test_enrolled_students_features_username(self):
self.assertIn('username', AVAILABLE_FEATURES)
userreports = enrolled_students_features(self.course_key, ['username'])
self.assertEqual(len(userreports), len(self.users))
for userreport in userreports:
self.assertEqual(userreport.keys(), ['username'])
self.assertIn(userreport['username'], [user.username for user in self.users])
def test_enrolled_students_features_keys(self):
query_features = ('username', 'name', 'email')
for feature in query_features:
self.assertIn(feature, AVAILABLE_FEATURES)
with self.assertNumQueries(1):
userreports = enrolled_students_features(self.course_key, query_features)
self.assertEqual(len(userreports), len(self.users))
for userreport in userreports:
self.assertEqual(set(userreport.keys()), set(query_features))
self.assertIn(userreport['username'], [user.username for user in self.users])
self.assertIn(userreport['email'], [user.email for user in self.users])
self.assertIn(userreport['name'], [user.profile.name for user in self.users])
def test_enrolled_students_meta_features_keys(self):
"""
Assert that we can query individual fields in the 'meta' field in the UserProfile
"""
query_features = ('meta.position', 'meta.company')
with self.assertNumQueries(1):
userreports = enrolled_students_features(self.course_key, query_features)
self.assertEqual(len(userreports), len(self.users))
for userreport in userreports:
self.assertEqual(set(userreport.keys()), set(query_features))
self.assertIn(userreport['meta.position'], ["edX expert {}".format(user.id) for user in self.users])
self.assertIn(userreport['meta.company'], ["Open edX Inc {}".format(user.id) for user in self.users])
def test_enrolled_students_features_keys_cohorted(self):
course = CourseFactory.create(org="test", course="course1", display_name="run1")
course.cohort_config = {'cohorted': True, 'auto_cohort': True, 'auto_cohort_groups': ['cohort']}
self.store.update_item(course, self.instructor.id)
cohort = CohortFactory.create(name='cohort', course_id=course.id)
cohorted_students = [UserFactory.create() for _ in xrange(10)]
cohorted_usernames = [student.username for student in cohorted_students]
non_cohorted_student = UserFactory.create()
for student in cohorted_students:
cohort.users.add(student)
CourseEnrollment.enroll(student, course.id)
CourseEnrollment.enroll(non_cohorted_student, course.id)
instructor = InstructorFactory(course_key=course.id)
self.client.login(username=instructor.username, password='test')
query_features = ('username', 'cohort')
# There should be a constant of 2 SQL queries when calling
# enrolled_students_features. The first query comes from the call to
# User.objects.filter(...), and the second comes from
# prefetch_related('course_groups').
with self.assertNumQueries(2):
userreports = enrolled_students_features(course.id, query_features)
self.assertEqual(len([r for r in userreports if r['username'] in cohorted_usernames]), len(cohorted_students))
self.assertEqual(len([r for r in userreports if r['username'] == non_cohorted_student.username]), 1)
for report in userreports:
self.assertEqual(set(report.keys()), set(query_features))
if report['username'] in cohorted_usernames:
self.assertEqual(report['cohort'], cohort.name)
else:
self.assertEqual(report['cohort'], '[unassigned]')
def test_available_features(self):
self.assertEqual(len(AVAILABLE_FEATURES), len(STUDENT_FEATURES + PROFILE_FEATURES))
self.assertEqual(set(AVAILABLE_FEATURES), set(STUDENT_FEATURES + PROFILE_FEATURES))
def test_list_may_enroll(self):
may_enroll = list_may_enroll(self.course_key, ['email'])
self.assertEqual(len(may_enroll), len(self.students_who_may_enroll) - len(self.users))
email_adresses = [student.email for student in self.students_who_may_enroll]
for student in may_enroll:
self.assertEqual(student.keys(), ['email'])
self.assertIn(student['email'], email_adresses)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestCourseSaleRecordsAnalyticsBasic(ModuleStoreTestCase):
""" Test basic course sale records analytics functions. """
def setUp(self):
"""
Fixtures.
"""
super(TestCourseSaleRecordsAnalyticsBasic, self).setUp()
self.course = CourseFactory.create()
self.cost = 40
self.course_mode = CourseMode(
course_id=self.course.id, mode_slug="honor",
mode_display_name="honor cert", min_price=self.cost
)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_course_sale_features(self):
query_features = [
'company_name', 'company_contact_name', 'company_contact_email', 'total_codes', 'total_used_codes',
'total_amount', 'created_at', 'customer_reference_number', 'recipient_name', 'recipient_email',
'created_by', 'internal_reference', 'invoice_number', 'codes', 'course_id'
]
#create invoice
sale_invoice = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='test@company.com', recipient_name='Testw_1', recipient_email='test2@test.com',
customer_reference_number='2Fwe23S', internal_reference="ABC", course_id=self.course.id
)
invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_code = CourseRegistrationCode(
code="test_code{}".format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice, invoice_item=invoice_item, mode_slug='honor'
)
course_code.save()
course_sale_records_list = sale_record_features(self.course.id, query_features)
for sale_record in course_sale_records_list:
self.assertEqual(sale_record['total_amount'], sale_invoice.total_amount)
self.assertEqual(sale_record['recipient_email'], sale_invoice.recipient_email)
self.assertEqual(sale_record['recipient_name'], sale_invoice.recipient_name)
self.assertEqual(sale_record['company_name'], sale_invoice.company_name)
self.assertEqual(sale_record['company_contact_name'], sale_invoice.company_contact_name)
self.assertEqual(sale_record['company_contact_email'], sale_invoice.company_contact_email)
self.assertEqual(sale_record['internal_reference'], sale_invoice.internal_reference)
self.assertEqual(sale_record['customer_reference_number'], sale_invoice.customer_reference_number)
self.assertEqual(sale_record['invoice_number'], sale_invoice.id)
self.assertEqual(sale_record['created_by'], self.instructor)
self.assertEqual(sale_record['total_used_codes'], 0)
self.assertEqual(sale_record['total_codes'], 5)
def test_sale_order_features_with_discount(self):
"""
Test Order Sales Report CSV
"""
query_features = [
('id', 'Order Id'),
('company_name', 'Company Name'),
('company_contact_name', 'Company Contact Name'),
('company_contact_email', 'Company Contact Email'),
('total_amount', 'Total Amount'),
('total_codes', 'Total Codes'),
('total_used_codes', 'Total Used Codes'),
('logged_in_username', 'Login Username'),
('logged_in_email', 'Login User Email'),
('purchase_time', 'Date of Sale'),
('customer_reference_number', 'Customer Reference Number'),
('recipient_name', 'Recipient Name'),
('recipient_email', 'Recipient Email'),
('bill_to_street1', 'Street 1'),
('bill_to_street2', 'Street 2'),
('bill_to_city', 'City'),
('bill_to_state', 'State'),
('bill_to_postalcode', 'Postal Code'),
('bill_to_country', 'Country'),
('order_type', 'Order Type'),
('status', 'Order Item Status'),
('coupon_code', 'Coupon Code'),
('unit_cost', 'Unit Price'),
('list_price', 'List Price'),
('codes', 'Registration Codes'),
('course_id', 'Course Id')
]
# add the coupon code for the course
coupon = Coupon(
code='test_code',
description='test_description',
course_id=self.course.id,
percentage_discount='10',
created_by=self.instructor,
is_active=True
)
coupon.save()
order = Order.get_cart_for_user(self.instructor)
order.order_type = 'business'
order.save()
order.add_billing_details(
company_name='Test Company',
company_contact_name='Test',
company_contact_email='test@123',
recipient_name='R1', recipient_email='',
customer_reference_number='PO#23'
)
CourseRegCodeItem.add_to_order(order, self.course.id, 4)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': coupon.code})
self.assertEqual(resp.status_code, 200)
order.purchase()
# get the updated item
item = order.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=order)
db_columns = [x[0] for x in query_features]
sale_order_records_list = sale_order_record_features(self.course.id, db_columns)
for sale_order_record in sale_order_records_list:
self.assertEqual(sale_order_record['recipient_email'], order.recipient_email)
self.assertEqual(sale_order_record['recipient_name'], order.recipient_name)
self.assertEqual(sale_order_record['company_name'], order.company_name)
self.assertEqual(sale_order_record['company_contact_name'], order.company_contact_name)
self.assertEqual(sale_order_record['company_contact_email'], order.company_contact_email)
self.assertEqual(sale_order_record['customer_reference_number'], order.customer_reference_number)
self.assertEqual(sale_order_record['unit_cost'], item.unit_cost)
self.assertEqual(sale_order_record['list_price'], item.list_price)
self.assertEqual(sale_order_record['status'], item.status)
self.assertEqual(sale_order_record['coupon_code'], coupon_redemption[0].coupon.code)
def test_sale_order_features_without_discount(self):
"""
Test Order Sales Report CSV
"""
query_features = [
('id', 'Order Id'),
('company_name', 'Company Name'),
('company_contact_name', 'Company Contact Name'),
('company_contact_email', 'Company Contact Email'),
('total_amount', 'Total Amount'),
('total_codes', 'Total Codes'),
('total_used_codes', 'Total Used Codes'),
('logged_in_username', 'Login Username'),
('logged_in_email', 'Login User Email'),
('purchase_time', 'Date of Sale'),
('customer_reference_number', 'Customer Reference Number'),
('recipient_name', 'Recipient Name'),
('recipient_email', 'Recipient Email'),
('bill_to_street1', 'Street 1'),
('bill_to_street2', 'Street 2'),
('bill_to_city', 'City'),
('bill_to_state', 'State'),
('bill_to_postalcode', 'Postal Code'),
('bill_to_country', 'Country'),
('order_type', 'Order Type'),
('status', 'Order Item Status'),
('coupon_code', 'Coupon Code'),
('unit_cost', 'Unit Price'),
('list_price', 'List Price'),
('codes', 'Registration Codes'),
('course_id', 'Course Id'),
('quantity', 'Quantity'),
('total_discount', 'Total Discount'),
('total_amount', 'Total Amount Paid'),
]
# add the coupon code for the course
order = Order.get_cart_for_user(self.instructor)
order.order_type = 'business'
order.save()
order.add_billing_details(
company_name='Test Company',
company_contact_name='Test',
company_contact_email='test@123',
recipient_name='R1', recipient_email='',
customer_reference_number='PO#23'
)
CourseRegCodeItem.add_to_order(order, self.course.id, 4)
order.purchase()
# get the updated item
item = order.orderitem_set.all().select_subclasses()[0]
db_columns = [x[0] for x in query_features]
sale_order_records_list = sale_order_record_features(self.course.id, db_columns)
for sale_order_record in sale_order_records_list:
self.assertEqual(sale_order_record['recipient_email'], order.recipient_email)
self.assertEqual(sale_order_record['recipient_name'], order.recipient_name)
self.assertEqual(sale_order_record['company_name'], order.company_name)
self.assertEqual(sale_order_record['company_contact_name'], order.company_contact_name)
self.assertEqual(sale_order_record['company_contact_email'], order.company_contact_email)
self.assertEqual(sale_order_record['customer_reference_number'], order.customer_reference_number)
self.assertEqual(sale_order_record['unit_cost'], item.unit_cost)
# Make sure list price is not None and matches the unit price since no discount was applied.
self.assertIsNotNone(sale_order_record['list_price'])
self.assertEqual(sale_order_record['list_price'], item.unit_cost)
self.assertEqual(sale_order_record['status'], item.status)
self.assertEqual(sale_order_record['coupon_code'], 'N/A')
self.assertEqual(sale_order_record['total_amount'], item.unit_cost * item.qty)
self.assertEqual(sale_order_record['total_discount'], 0)
self.assertEqual(sale_order_record['quantity'], item.qty)
class TestCourseRegistrationCodeAnalyticsBasic(ModuleStoreTestCase):
""" Test basic course registration codes analytics functions. """
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodeAnalyticsBasic, self).setUp()
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
# Create a paid course mode.
mode = CourseModeFactory.create()
mode.course_id = self.course.id
mode.min_price = 1
mode.save()
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'unit_price': 122.45,
'company_contact_name': 'TestName', 'company_contact_email': 'test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
def test_course_registration_features(self):
query_features = [
'code', 'redeem_code_url', 'course_id', 'company_name', 'created_by',
'redeemed_by', 'invoice_id', 'purchaser', 'customer_reference_number', 'internal_reference'
]
order = Order(user=self.instructor, status='purchased')
order.save()
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=1, redeemed_by=self.instructor
)
registration_code_redemption.save()
registration_codes = CourseRegistrationCode.objects.all()
course_registration_list = course_registration_features(query_features, registration_codes, csv_type='download')
self.assertEqual(len(course_registration_list), len(registration_codes))
for course_registration in course_registration_list:
self.assertEqual(set(course_registration.keys()), set(query_features))
self.assertIn(course_registration['code'], [registration_code.code for registration_code in registration_codes])
self.assertIn(
course_registration['course_id'],
[registration_code.course_id.to_deprecated_string() for registration_code in registration_codes]
)
self.assertIn(
course_registration['company_name'],
[
getattr(registration_code.invoice_item.invoice, 'company_name')
for registration_code in registration_codes
]
)
self.assertIn(
course_registration['invoice_id'],
[
registration_code.invoice_item.invoice_id
for registration_code in registration_codes
]
)
def test_coupon_codes_features(self):
query_features = [
'course_id', 'percentage_discount', 'code_redeemed_count', 'description', 'expiration_date',
'total_discounted_amount', 'total_discounted_seats'
]
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i),
description='test_description',
course_id=self.course.id, percentage_discount='{0}'.format(i),
created_by=self.instructor,
is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
active_coupons = Coupon.objects.filter(
Q(course_id=self.course.id),
Q(is_active=True),
Q(expiration_date__gt=datetime.datetime.now(pytz.UTC)) |
Q(expiration_date__isnull=True)
)
active_coupons_list = coupon_codes_features(query_features, active_coupons, self.course.id)
self.assertEqual(len(active_coupons_list), len(active_coupons))
for active_coupon in active_coupons_list:
self.assertEqual(set(active_coupon.keys()), set(query_features))
self.assertIn(active_coupon['percentage_discount'], [coupon.percentage_discount for coupon in active_coupons])
self.assertIn(active_coupon['description'], [coupon.description for coupon in active_coupons])
if active_coupon['expiration_date']:
self.assertIn(active_coupon['expiration_date'], [coupon.display_expiry_date for coupon in active_coupons])
self.assertIn(
active_coupon['course_id'],
[coupon.course_id.to_deprecated_string() for coupon in active_coupons]
)
|
abhiQmar/servo | refs/heads/master | tests/wpt/web-platform-tests/referrer-policy/generic/tools/spec_validator.py | 326 | #!/usr/bin/env python
import json, sys
from common_paths import *
def assert_non_empty_string(obj, field):
assert field in obj, 'Missing field "%s"' % field
assert isinstance(obj[field], basestring), \
'Field "%s" must be a string' % field
assert len(obj[field]) > 0, 'Field "%s" must not be empty' % field
def assert_non_empty_list(obj, field):
assert isinstance(obj[field], list), \
'%s must be a list' % field
assert len(obj[field]) > 0, \
'%s list must not be empty' % field
def assert_non_empty_dict(obj, field):
assert isinstance(obj[field], dict), \
'%s must be a dict' % field
assert len(obj[field]) > 0, \
'%s dict must not be empty' % field
def assert_contains(obj, field):
assert field in obj, 'Must contain field "%s"' % field
def assert_value_from(obj, field, items):
assert obj[field] in items, \
'Field "%s" must be from: %s' % (field, str(items))
def assert_atom_or_list_items_from(obj, field, items):
if isinstance(obj[field], basestring) or isinstance(obj[field], int):
assert_value_from(obj, field, items)
return
assert_non_empty_list(obj, field)
for allowed_value in obj[field]:
assert allowed_value != '*', "Wildcard is not supported for lists!"
assert allowed_value in items, \
'Field "%s" must be from: %s' % (field, str(items))
def assert_contains_only_fields(obj, expected_fields):
for expected_field in expected_fields:
assert_contains(obj, expected_field)
for actual_field in obj:
assert actual_field in expected_fields, \
'Unexpected field "%s".' % actual_field
def assert_value_unique_in(value, used_values):
assert value not in used_values, 'Duplicate value "%s"!' % str(value)
used_values[value] = True
def validate(spec_json, details):
""" Validates the json specification for generating tests. """
details['object'] = spec_json
assert_contains_only_fields(spec_json, ["specification",
"referrer_policy_schema",
"test_expansion_schema",
"subresource_path",
"excluded_tests"])
assert_non_empty_list(spec_json, "specification")
assert_non_empty_list(spec_json, "referrer_policy_schema")
assert_non_empty_dict(spec_json, "test_expansion_schema")
assert_non_empty_list(spec_json, "excluded_tests")
specification = spec_json['specification']
referrer_policy_schema = spec_json['referrer_policy_schema']
test_expansion_schema = spec_json['test_expansion_schema']
excluded_tests = spec_json['excluded_tests']
subresource_path = spec_json['subresource_path']
valid_test_expansion_fields = ['name'] + test_expansion_schema.keys()
# Validate each single spec.
for spec in specification:
details['object'] = spec
# Validate required fields for a single spec.
assert_contains_only_fields(spec, ['name',
'title',
'description',
'referrer_policy',
'specification_url',
'test_expansion'])
assert_non_empty_string(spec, 'name')
assert_non_empty_string(spec, 'title')
assert_non_empty_string(spec, 'description')
assert_non_empty_string(spec, 'specification_url')
assert_value_from(spec, 'referrer_policy', referrer_policy_schema)
assert_non_empty_list(spec, 'test_expansion')
# Validate spec's test expansion.
used_spec_names = {}
for spec_exp in spec['test_expansion']:
details['object'] = spec_exp
assert_non_empty_string(spec_exp, 'name')
# The name is unique in same expansion group.
assert_value_unique_in((spec_exp['expansion'], spec_exp['name']),
used_spec_names)
assert_contains_only_fields(spec_exp, valid_test_expansion_fields)
for artifact in test_expansion_schema:
details['test_expansion_field'] = artifact
assert_atom_or_list_items_from(
spec_exp, artifact, ['*'] + test_expansion_schema[artifact])
del details['test_expansion_field']
# Validate the test_expansion schema members.
details['object'] = test_expansion_schema
assert_contains_only_fields(test_expansion_schema, ['expansion',
'delivery_method',
'redirection',
'origin',
'source_protocol',
'target_protocol',
'subresource',
'referrer_url'])
# Validate excluded tests.
details['object'] = excluded_tests
for excluded_test_expansion in excluded_tests:
assert_contains_only_fields(excluded_test_expansion,
valid_test_expansion_fields)
details['object'] = excluded_test_expansion
for artifact in test_expansion_schema:
details['test_expansion_field'] = artifact
assert_atom_or_list_items_from(
excluded_test_expansion,
artifact,
['*'] + test_expansion_schema[artifact])
del details['test_expansion_field']
# Validate subresource paths.
details['object'] = subresource_path
assert_contains_only_fields(subresource_path,
test_expansion_schema['subresource']);
for subresource in subresource_path:
local_rel_path = "." + subresource_path[subresource]
full_path = os.path.join(test_root_directory, local_rel_path)
assert os.path.isfile(full_path), "%s is not an existing file" % path
del details['object']
def assert_valid_spec_json(spec_json):
error_details = {}
try:
validate(spec_json, error_details)
except AssertionError, err:
print 'ERROR:', err.message
print json.dumps(error_details, indent=4)
sys.exit(1)
def main():
spec_json = load_spec_json();
assert_valid_spec_json(spec_json)
print "Spec JSON is valid."
if __name__ == '__main__':
main()
|
cstipkovic/spidermonkey-research | refs/heads/master | python/mozbuild/mozbuild/mach_commands.py | 1 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import errno
import itertools
import json
import logging
import operator
import os
import subprocess
import sys
import mozpack.path as mozpath
from mach.decorators import (
CommandArgument,
CommandArgumentGroup,
CommandProvider,
Command,
SubCommand,
)
from mach.mixin.logging import LoggingMixin
from mozbuild.base import (
BuildEnvironmentNotFoundException,
MachCommandBase,
MachCommandConditions as conditions,
MozbuildObject,
MozconfigFindException,
MozconfigLoadException,
ObjdirMismatchException,
)
from mozpack.manifests import (
InstallManifest,
)
from mozbuild.backend import backends
from mozbuild.shellutil import quote as shell_quote
BUILD_WHAT_HELP = '''
What to build. Can be a top-level make target or a relative directory. If
multiple options are provided, they will be built serially. Takes dependency
information from `topsrcdir/build/dumbmake-dependencies` to build additional
targets as needed. BUILDING ONLY PARTS OF THE TREE CAN RESULT IN BAD TREE
STATE. USE AT YOUR OWN RISK.
'''.strip()
FINDER_SLOW_MESSAGE = '''
===================
PERFORMANCE WARNING
The OS X Finder application (file indexing used by Spotlight) used a lot of CPU
during the build - an average of %f%% (100%% is 1 core). This made your build
slower.
Consider adding ".noindex" to the end of your object directory name to have
Finder ignore it. Or, add an indexing exclusion through the Spotlight System
Preferences.
===================
'''.strip()
EXCESSIVE_SWAP_MESSAGE = '''
===================
PERFORMANCE WARNING
Your machine experienced a lot of swap activity during the build. This is
possibly a sign that your machine doesn't have enough physical memory or
not enough available memory to perform the build. It's also possible some
other system activity during the build is to blame.
If you feel this message is not appropriate for your machine configuration,
please file a Core :: Build Config bug at
https://bugzilla.mozilla.org/enter_bug.cgi?product=Core&component=Build%20Config
and tell us about your machine and build configuration so we can adjust the
warning heuristic.
===================
'''
class TerminalLoggingHandler(logging.Handler):
"""Custom logging handler that works with terminal window dressing.
This class should probably live elsewhere, like the mach core. Consider
this a proving ground for its usefulness.
"""
def __init__(self):
logging.Handler.__init__(self)
self.fh = sys.stdout
self.footer = None
def flush(self):
self.acquire()
try:
self.fh.flush()
finally:
self.release()
def emit(self, record):
msg = self.format(record)
self.acquire()
try:
if self.footer:
self.footer.clear()
self.fh.write(msg)
self.fh.write('\n')
if self.footer:
self.footer.draw()
# If we don't flush, the footer may not get drawn.
self.fh.flush()
finally:
self.release()
class BuildProgressFooter(object):
"""Handles display of a build progress indicator in a terminal.
When mach builds inside a blessings-supported terminal, it will render
progress information collected from a BuildMonitor. This class converts the
state of BuildMonitor into terminal output.
"""
def __init__(self, terminal, monitor):
# terminal is a blessings.Terminal.
self._t = terminal
self._fh = sys.stdout
self.tiers = monitor.tiers.tier_status.viewitems()
def clear(self):
"""Removes the footer from the current terminal."""
self._fh.write(self._t.move_x(0))
self._fh.write(self._t.clear_eos())
def draw(self):
"""Draws this footer in the terminal."""
if not self.tiers:
return
# The drawn terminal looks something like:
# TIER: base nspr nss js platform app SUBTIER: static export libs tools DIRECTORIES: 06/09 (memory)
# This is a list of 2-tuples of (encoding function, input). None means
# no encoding. For a full reason on why we do things this way, read the
# big comment below.
parts = [('bold', 'TIER:')]
append = parts.append
for tier, status in self.tiers:
if status is None:
append(tier)
elif status == 'finished':
append(('green', tier))
else:
append(('underline_yellow', tier))
# We don't want to write more characters than the current width of the
# terminal otherwise wrapping may result in weird behavior. We can't
# simply truncate the line at terminal width characters because a)
# non-viewable escape characters count towards the limit and b) we
# don't want to truncate in the middle of an escape sequence because
# subsequent output would inherit the escape sequence.
max_width = self._t.width
written = 0
write_pieces = []
for part in parts:
try:
func, part = part
encoded = getattr(self._t, func)(part)
except ValueError:
encoded = part
len_part = len(part)
len_spaces = len(write_pieces)
if written + len_part + len_spaces > max_width:
write_pieces.append(part[0:max_width - written - len_spaces])
written += len_part
break
write_pieces.append(encoded)
written += len_part
with self._t.location():
self._t.move(self._t.height-1,0)
self._fh.write(' '.join(write_pieces))
class BuildOutputManager(LoggingMixin):
"""Handles writing build output to a terminal, to logs, etc."""
def __init__(self, log_manager, monitor):
self.populate_logger()
self.monitor = monitor
self.footer = None
terminal = log_manager.terminal
# TODO convert terminal footer to config file setting.
if not terminal or os.environ.get('MACH_NO_TERMINAL_FOOTER', None):
return
self.t = terminal
self.footer = BuildProgressFooter(terminal, monitor)
self._handler = TerminalLoggingHandler()
self._handler.setFormatter(log_manager.terminal_formatter)
self._handler.footer = self.footer
old = log_manager.replace_terminal_handler(self._handler)
self._handler.level = old.level
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.footer:
self.footer.clear()
# Prevents the footer from being redrawn if logging occurs.
self._handler.footer = None
def write_line(self, line):
if self.footer:
self.footer.clear()
print(line)
if self.footer:
self.footer.draw()
def refresh(self):
if not self.footer:
return
self.footer.clear()
self.footer.draw()
def on_line(self, line):
warning, state_changed, relevant = self.monitor.on_line(line)
if warning:
self.log(logging.INFO, 'compiler_warning', warning,
'Warning: {flag} in {filename}: {message}')
if relevant:
self.log(logging.INFO, 'build_output', {'line': line}, '{line}')
elif state_changed:
have_handler = hasattr(self, 'handler')
if have_handler:
self.handler.acquire()
try:
self.refresh()
finally:
if have_handler:
self.handler.release()
@CommandProvider
class Build(MachCommandBase):
"""Interface to build the tree."""
@Command('build', category='build', description='Build the tree.')
@CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
help='Number of concurrent jobs to run. Default is the number of CPUs.')
@CommandArgument('-C', '--directory', default=None,
help='Change to a subdirectory of the build directory first.')
@CommandArgument('what', default=None, nargs='*', help=BUILD_WHAT_HELP)
@CommandArgument('-X', '--disable-extra-make-dependencies',
default=False, action='store_true',
help='Do not add extra make dependencies.')
@CommandArgument('-v', '--verbose', action='store_true',
help='Verbose output for what commands the build is running.')
def build(self, what=None, disable_extra_make_dependencies=None, jobs=0,
directory=None, verbose=False):
"""Build the source tree.
With no arguments, this will perform a full build.
Positional arguments define targets to build. These can be make targets
or patterns like "<dir>/<target>" to indicate a make target within a
directory.
There are a few special targets that can be used to perform a partial
build faster than what `mach build` would perform:
* binaries - compiles and links all C/C++ sources and produces shared
libraries and executables (binaries).
* faster - builds JavaScript, XUL, CSS, etc files.
"binaries" and "faster" almost fully complement each other. However,
there are build actions not captured by either. If things don't appear to
be rebuilding, perform a vanilla `mach build` to rebuild the world.
"""
import which
from mozbuild.controller.building import BuildMonitor
from mozbuild.util import (
mkdir,
resolve_target_to_make,
)
self.log_manager.register_structured_logger(logging.getLogger('mozbuild'))
warnings_path = self._get_state_filename('warnings.json')
monitor = self._spawn(BuildMonitor)
monitor.init(warnings_path)
ccache_start = monitor.ccache_stats()
# Disable indexing in objdir because it is not necessary and can slow
# down builds.
mkdir(self.topobjdir, not_indexed=True)
with BuildOutputManager(self.log_manager, monitor) as output:
monitor.start()
if directory is not None and not what:
print('Can only use -C/--directory with an explicit target '
'name.')
return 1
if directory is not None:
disable_extra_make_dependencies=True
directory = mozpath.normsep(directory)
if directory.startswith('/'):
directory = directory[1:]
status = None
monitor.start_resource_recording()
if what:
top_make = os.path.join(self.topobjdir, 'Makefile')
if not os.path.exists(top_make):
print('Your tree has not been configured yet. Please run '
'|mach build| with no arguments.')
return 1
# Collect target pairs.
target_pairs = []
for target in what:
path_arg = self._wrap_path_argument(target)
if directory is not None:
make_dir = os.path.join(self.topobjdir, directory)
make_target = target
else:
make_dir, make_target = \
resolve_target_to_make(self.topobjdir,
path_arg.relpath())
if make_dir is None and make_target is None:
return 1
# See bug 886162 - we don't want to "accidentally" build
# the entire tree (if that's really the intent, it's
# unlikely they would have specified a directory.)
if not make_dir and not make_target:
print("The specified directory doesn't contain a "
"Makefile and the first parent with one is the "
"root of the tree. Please specify a directory "
"with a Makefile or run |mach build| if you "
"want to build the entire tree.")
return 1
target_pairs.append((make_dir, make_target))
# Possibly add extra make depencies using dumbmake.
if not disable_extra_make_dependencies:
from dumbmake.dumbmake import (dependency_map,
add_extra_dependencies)
depfile = os.path.join(self.topsrcdir, 'build',
'dumbmake-dependencies')
with open(depfile) as f:
dm = dependency_map(f.readlines())
new_pairs = list(add_extra_dependencies(target_pairs, dm))
self.log(logging.DEBUG, 'dumbmake',
{'target_pairs': target_pairs,
'new_pairs': new_pairs},
'Added extra dependencies: will build {new_pairs} ' +
'instead of {target_pairs}.')
target_pairs = new_pairs
# Ensure build backend is up to date. The alternative is to
# have rules in the invoked Makefile to rebuild the build
# backend. But that involves make reinvoking itself and there
# are undesired side-effects of this. See bug 877308 for a
# comprehensive history lesson.
self._run_make(directory=self.topobjdir, target='backend',
line_handler=output.on_line, log=False,
print_directory=False)
# Build target pairs.
for make_dir, make_target in target_pairs:
# We don't display build status messages during partial
# tree builds because they aren't reliable there. This
# could potentially be fixed if the build monitor were more
# intelligent about encountering undefined state.
status = self._run_make(directory=make_dir, target=make_target,
line_handler=output.on_line, log=False, print_directory=False,
ensure_exit_code=False, num_jobs=jobs, silent=not verbose,
append_env={b'NO_BUILDSTATUS_MESSAGES': b'1'})
if status != 0:
break
else:
status = self._run_make(srcdir=True, filename='client.mk',
line_handler=output.on_line, log=False, print_directory=False,
allow_parallel=False, ensure_exit_code=False, num_jobs=jobs,
silent=not verbose)
self.log(logging.WARNING, 'warning_summary',
{'count': len(monitor.warnings_database)},
'{count} compiler warnings present.')
monitor.finish(record_usage=status==0)
high_finder, finder_percent = monitor.have_high_finder_usage()
if high_finder:
print(FINDER_SLOW_MESSAGE % finder_percent)
ccache_end = monitor.ccache_stats()
ccache_diff = None
if ccache_start and ccache_end:
ccache_diff = ccache_end - ccache_start
if ccache_diff:
self.log(logging.INFO, 'ccache',
{'msg': ccache_diff.hit_rate_message()}, "{msg}")
notify_minimum_time = 300
try:
notify_minimum_time = int(os.environ.get('MACH_NOTIFY_MINTIME', '300'))
except ValueError:
# Just stick with the default
pass
if monitor.elapsed > notify_minimum_time:
# Display a notification when the build completes.
self.notify('Build complete' if not status else 'Build failed')
if status:
return status
long_build = monitor.elapsed > 600
if long_build:
output.on_line('We know it took a while, but your build finally finished successfully!')
else:
output.on_line('Your build was successful!')
if monitor.have_resource_usage:
excessive, swap_in, swap_out = monitor.have_excessive_swapping()
# if excessive:
# print(EXCESSIVE_SWAP_MESSAGE)
print('To view resource usage of the build, run |mach '
'resource-usage|.')
telemetry_handler = getattr(self._mach_context,
'telemetry_handler', None)
telemetry_data = monitor.get_resource_usage()
# Record build configuration data. For now, we cherry pick
# items we need rather than grabbing everything, in order
# to avoid accidentally disclosing PII.
telemetry_data['substs'] = {}
try:
for key in ['MOZ_ARTIFACT_BUILDS', 'MOZ_USING_CCACHE']:
value = self.substs.get(key, False)
telemetry_data['substs'][key] = value
except BuildEnvironmentNotFoundException:
pass
# Grab ccache stats if available. We need to be careful not
# to capture information that can potentially identify the
# user (such as the cache location)
if ccache_diff:
telemetry_data['ccache'] = {}
for key in [key[0] for key in ccache_diff.STATS_KEYS]:
try:
telemetry_data['ccache'][key] = ccache_diff._values[key]
except KeyError:
pass
telemetry_handler(self._mach_context, telemetry_data)
# Only for full builds because incremental builders likely don't
# need to be burdened with this.
if not what:
try:
# Fennec doesn't have useful output from just building. We should
# arguably make the build action useful for Fennec. Another day...
if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
print('To take your build for a test drive, run: |mach run|')
app = self.substs['MOZ_BUILD_APP']
if app in ('browser', 'mobile/android'):
print('For more information on what to do now, see '
'https://developer.mozilla.org/docs/Developer_Guide/So_You_Just_Built_Firefox')
except Exception:
# Ignore Exceptions in case we can't find config.status (such
# as when doing OSX Universal builds)
pass
return status
@Command('configure', category='build',
description='Configure the tree (run configure and config.status).')
@CommandArgument('options', default=None, nargs=argparse.REMAINDER,
help='Configure options')
def configure(self, options=None):
def on_line(line):
self.log(logging.INFO, 'build_output', {'line': line}, '{line}')
options = ' '.join(shell_quote(o) for o in options or ())
status = self._run_make(srcdir=True, filename='client.mk',
target='configure', line_handler=on_line, log=False,
print_directory=False, allow_parallel=False, ensure_exit_code=False,
append_env={b'CONFIGURE_ARGS': options.encode('utf-8'),
b'NO_BUILDSTATUS_MESSAGES': b'1',})
if not status:
print('Configure complete!')
print('Be sure to run |mach build| to pick up any changes');
return status
@Command('resource-usage', category='post-build',
description='Show information about system resource usage for a build.')
@CommandArgument('--address', default='localhost',
help='Address the HTTP server should listen on.')
@CommandArgument('--port', type=int, default=0,
help='Port number the HTTP server should listen on.')
@CommandArgument('--browser', default='firefox',
help='Web browser to automatically open. See webbrowser Python module.')
@CommandArgument('--url',
help='URL of JSON document to display')
def resource_usage(self, address=None, port=None, browser=None, url=None):
import webbrowser
from mozbuild.html_build_viewer import BuildViewerServer
server = BuildViewerServer(address, port)
if url:
server.add_resource_json_url('url', url)
else:
last = self._get_state_filename('build_resources.json')
if not os.path.exists(last):
print('Build resources not available. If you have performed a '
'build and receive this message, the psutil Python package '
'likely failed to initialize properly.')
return 1
server.add_resource_json_file('last', last)
try:
webbrowser.get(browser).open_new_tab(server.url)
except Exception:
print('Cannot get browser specified, trying the default instead.')
try:
browser = webbrowser.get().open_new_tab(server.url)
except Exception:
print('Please open %s in a browser.' % server.url)
print('Hit CTRL+c to stop server.')
server.run()
CLOBBER_CHOICES = ['objdir', 'python']
@Command('clobber', category='build',
description='Clobber the tree (delete the object directory).')
@CommandArgument('what', default=['objdir'], nargs='*',
help='Target to clobber, must be one of {{{}}} (default objdir).'.format(
', '.join(CLOBBER_CHOICES)))
def clobber(self, what):
invalid = set(what) - set(self.CLOBBER_CHOICES)
if invalid:
print('Unknown clobber target(s): {}'.format(', '.join(invalid)))
return 1
ret = 0
if 'objdir' in what:
try:
self.remove_objdir()
except OSError as e:
if sys.platform.startswith('win'):
if isinstance(e, WindowsError) and e.winerror in (5,32):
self.log(logging.ERROR, 'file_access_error', {'error': e},
"Could not clobber because a file was in use. If the "
"application is running, try closing it. {error}")
return 1
raise
if 'python' in what:
if os.path.isdir(mozpath.join(self.topsrcdir, '.hg')):
cmd = ['hg', 'purge', '--all', '-I', 'glob:**.py[co]']
elif os.path.isdir(mozpath.join(self.topsrcdir, '.git')):
cmd = ['git', 'clean', '-f', '-x', '*.py[co]']
else:
cmd = ['find', '.', '-type', 'f', '-name', '*.py[co]', '-delete']
ret = subprocess.call(cmd, cwd=self.topsrcdir)
return ret
@Command('build-backend', category='build',
description='Generate a backend used to build the tree.')
@CommandArgument('-d', '--diff', action='store_true',
help='Show a diff of changes.')
# It would be nice to filter the choices below based on
# conditions, but that is for another day.
@CommandArgument('-b', '--backend', nargs='+', choices=sorted(backends),
help='Which backend to build.')
@CommandArgument('-v', '--verbose', action='store_true',
help='Verbose output.')
@CommandArgument('-n', '--dry-run', action='store_true',
help='Do everything except writing files out.')
def build_backend(self, backend, diff=False, verbose=False, dry_run=False):
python = self.virtualenv_manager.python_path
config_status = os.path.join(self.topobjdir, 'config.status')
if not os.path.exists(config_status):
print('config.status not found. Please run |mach configure| '
'or |mach build| prior to building the %s build backend.'
% backend)
return 1
args = [python, config_status]
if backend:
args.append('--backend')
args.extend(backend)
if diff:
args.append('--diff')
if verbose:
args.append('--verbose')
if dry_run:
args.append('--dry-run')
return self._run_command_in_objdir(args=args, pass_thru=True,
ensure_exit_code=False)
@CommandProvider
class Doctor(MachCommandBase):
"""Provide commands for diagnosing common build environment problems"""
@Command('doctor', category='devenv',
description='')
@CommandArgument('--fix', default=None, action='store_true',
help='Attempt to fix found problems.')
def doctor(self, fix=None):
self._activate_virtualenv()
from mozbuild.doctor import Doctor
doctor = Doctor(self.topsrcdir, self.topobjdir, fix)
return doctor.check_all()
@CommandProvider
class Logs(MachCommandBase):
"""Provide commands to read mach logs."""
NO_AUTO_LOG = True
@Command('show-log', category='post-build',
description='Display mach logs')
@CommandArgument('log_file', nargs='?', type=argparse.FileType('rb'),
help='Filename to read log data from. Defaults to the log of the last '
'mach command.')
def show_log(self, log_file=None):
if not log_file:
path = self._get_state_filename('last_log.json')
log_file = open(path, 'rb')
if self.log_manager.terminal:
env = dict(os.environ)
if 'LESS' not in env:
# Sensible default flags if none have been set in the user
# environment.
env['LESS'] = 'FRX'
less = subprocess.Popen(['less'], stdin=subprocess.PIPE, env=env)
# Various objects already have a reference to sys.stdout, so we
# can't just change it, we need to change the file descriptor under
# it to redirect to less's input.
# First keep a copy of the sys.stdout file descriptor.
output_fd = os.dup(sys.stdout.fileno())
os.dup2(less.stdin.fileno(), sys.stdout.fileno())
startTime = 0
for line in log_file:
created, action, params = json.loads(line)
if not startTime:
startTime = created
self.log_manager.terminal_handler.formatter.start_time = \
created
if 'line' in params:
record = logging.makeLogRecord({
'created': created,
'name': self._logger.name,
'levelno': logging.INFO,
'msg': '{line}',
'params': params,
'action': action,
})
self._logger.handle(record)
if self.log_manager.terminal:
# Close less's input so that it knows that we're done sending data.
less.stdin.close()
# Since the less's input file descriptor is now also the stdout
# file descriptor, we still actually have a non-closed system file
# descriptor for less's input. Replacing sys.stdout's file
# descriptor with what it was before we replaced it will properly
# close less's input.
os.dup2(output_fd, sys.stdout.fileno())
less.wait()
@CommandProvider
class Warnings(MachCommandBase):
"""Provide commands for inspecting warnings."""
@property
def database_path(self):
return self._get_state_filename('warnings.json')
@property
def database(self):
from mozbuild.compilation.warnings import WarningsDatabase
path = self.database_path
database = WarningsDatabase()
if os.path.exists(path):
database.load_from_file(path)
return database
@Command('warnings-summary', category='post-build',
description='Show a summary of compiler warnings.')
@CommandArgument('-C', '--directory', default=None,
help='Change to a subdirectory of the build directory first.')
@CommandArgument('report', default=None, nargs='?',
help='Warnings report to display. If not defined, show the most '
'recent report.')
def summary(self, directory=None, report=None):
database = self.database
if directory:
dirpath = self.join_ensure_dir(self.topsrcdir, directory)
if not dirpath:
return 1
else:
dirpath = None
type_counts = database.type_counts(dirpath)
sorted_counts = sorted(type_counts.iteritems(),
key=operator.itemgetter(1))
total = 0
for k, v in sorted_counts:
print('%d\t%s' % (v, k))
total += v
print('%d\tTotal' % total)
@Command('warnings-list', category='post-build',
description='Show a list of compiler warnings.')
@CommandArgument('-C', '--directory', default=None,
help='Change to a subdirectory of the build directory first.')
@CommandArgument('--flags', default=None, nargs='+',
help='Which warnings flags to match.')
@CommandArgument('report', default=None, nargs='?',
help='Warnings report to display. If not defined, show the most '
'recent report.')
def list(self, directory=None, flags=None, report=None):
database = self.database
by_name = sorted(database.warnings)
topsrcdir = mozpath.normpath(self.topsrcdir)
if directory:
directory = mozpath.normsep(directory)
dirpath = self.join_ensure_dir(topsrcdir, directory)
if not dirpath:
return 1
if flags:
# Flatten lists of flags.
flags = set(itertools.chain(*[flaglist.split(',') for flaglist in flags]))
for warning in by_name:
filename = mozpath.normsep(warning['filename'])
if filename.startswith(topsrcdir):
filename = filename[len(topsrcdir) + 1:]
if directory and not filename.startswith(directory):
continue
if flags and warning['flag'] not in flags:
continue
if warning['column'] is not None:
print('%s:%d:%d [%s] %s' % (filename, warning['line'],
warning['column'], warning['flag'], warning['message']))
else:
print('%s:%d [%s] %s' % (filename, warning['line'],
warning['flag'], warning['message']))
def join_ensure_dir(self, dir1, dir2):
dir1 = mozpath.normpath(dir1)
dir2 = mozpath.normsep(dir2)
joined_path = mozpath.join(dir1, dir2)
if os.path.isdir(joined_path):
return joined_path
else:
print('Specified directory not found.')
return None
@CommandProvider
class GTestCommands(MachCommandBase):
@Command('gtest', category='testing',
description='Run GTest unit tests (C++ tests).')
@CommandArgument('gtest_filter', default=b"*", nargs='?', metavar='gtest_filter',
help="test_filter is a ':'-separated list of wildcard patterns (called the positive patterns),"
"optionally followed by a '-' and another ':'-separated pattern list (called the negative patterns).")
@CommandArgument('--jobs', '-j', default='1', nargs='?', metavar='jobs', type=int,
help='Run the tests in parallel using multiple processes.')
@CommandArgument('--tbpl-parser', '-t', action='store_true',
help='Output test results in a format that can be parsed by TBPL.')
@CommandArgument('--shuffle', '-s', action='store_true',
help='Randomize the execution order of tests.')
@CommandArgumentGroup('debugging')
@CommandArgument('--debug', action='store_true', group='debugging',
help='Enable the debugger. Not specifying a --debugger option will result in the default debugger being used.')
@CommandArgument('--debugger', default=None, type=str, group='debugging',
help='Name of debugger to use.')
@CommandArgument('--debugger-args', default=None, metavar='params', type=str,
group='debugging',
help='Command-line arguments to pass to the debugger itself; split as the Bourne shell would.')
def gtest(self, shuffle, jobs, gtest_filter, tbpl_parser, debug, debugger,
debugger_args):
# We lazy build gtest because it's slow to link
self._run_make(directory="testing/gtest", target='gtest',
print_directory=False, ensure_exit_code=True)
app_path = self.get_binary_path('app')
args = [app_path, '-unittest'];
if debug or debugger or debugger_args:
args = self.prepend_debugger_args(args, debugger, debugger_args)
cwd = os.path.join(self.topobjdir, '_tests', 'gtest')
if not os.path.isdir(cwd):
os.makedirs(cwd)
# Use GTest environment variable to control test execution
# For details see:
# https://code.google.com/p/googletest/wiki/AdvancedGuide#Running_Test_Programs:_Advanced_Options
gtest_env = {b'GTEST_FILTER': gtest_filter}
# Note: we must normalize the path here so that gtest on Windows sees
# a MOZ_GMP_PATH which has only Windows dir seperators, because
# nsILocalFile cannot open the paths with non-Windows dir seperators.
xre_path = os.path.join(os.path.normpath(self.topobjdir), "dist", "bin")
gtest_env["MOZ_XRE_DIR"] = xre_path
gtest_env["MOZ_GMP_PATH"] = os.pathsep.join(
os.path.join(xre_path, p, "1.0")
for p in ('gmp-fake', 'gmp-fakeopenh264')
)
gtest_env[b"MOZ_RUN_GTEST"] = b"True"
if shuffle:
gtest_env[b"GTEST_SHUFFLE"] = b"True"
if tbpl_parser:
gtest_env[b"MOZ_TBPL_PARSER"] = b"True"
if jobs == 1:
return self.run_process(args=args,
append_env=gtest_env,
cwd=cwd,
ensure_exit_code=False,
pass_thru=True)
from mozprocess import ProcessHandlerMixin
import functools
def handle_line(job_id, line):
# Prepend the jobId
line = '[%d] %s' % (job_id + 1, line.strip())
self.log(logging.INFO, "GTest", {'line': line}, '{line}')
gtest_env["GTEST_TOTAL_SHARDS"] = str(jobs)
processes = {}
for i in range(0, jobs):
gtest_env["GTEST_SHARD_INDEX"] = str(i)
processes[i] = ProcessHandlerMixin([app_path, "-unittest"],
cwd=cwd,
env=gtest_env,
processOutputLine=[functools.partial(handle_line, i)],
universal_newlines=True)
processes[i].run()
exit_code = 0
for process in processes.values():
status = process.wait()
if status:
exit_code = status
# Clamp error code to 255 to prevent overflowing multiple of
# 256 into 0
if exit_code > 255:
exit_code = 255
return exit_code
def prepend_debugger_args(self, args, debugger, debugger_args):
'''
Given an array with program arguments, prepend arguments to run it under a
debugger.
:param args: The executable and arguments used to run the process normally.
:param debugger: The debugger to use, or empty to use the default debugger.
:param debugger_args: Any additional parameters to pass to the debugger.
'''
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
if debugger:
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
if not debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
# Parameters come from the CLI. We need to convert them before
# their use.
if debugger_args:
from mozbuild import shellutil
try:
debugger_args = shellutil.split(debugger_args)
except shellutil.MetaCharacterException as e:
print("The --debugger_args you passed require a real shell to parse them.")
print("(We can't handle the %r character.)" % e.char)
return 1
# Prepend the debugger args.
args = [debuggerInfo.path] + debuggerInfo.args + args
return args
@CommandProvider
class ClangCommands(MachCommandBase):
@Command('clang-complete', category='devenv',
description='Generate a .clang_complete file.')
def clang_complete(self):
import shlex
build_vars = {}
def on_line(line):
elements = [s.strip() for s in line.split('=', 1)]
if len(elements) != 2:
return
build_vars[elements[0]] = elements[1]
try:
old_logger = self.log_manager.replace_terminal_handler(None)
self._run_make(target='showbuild', log=False, line_handler=on_line)
finally:
self.log_manager.replace_terminal_handler(old_logger)
def print_from_variable(name):
if name not in build_vars:
return
value = build_vars[name]
value = value.replace('-I.', '-I%s' % self.topobjdir)
value = value.replace(' .', ' %s' % self.topobjdir)
value = value.replace('-I..', '-I%s/..' % self.topobjdir)
value = value.replace(' ..', ' %s/..' % self.topobjdir)
args = shlex.split(value)
for i in range(0, len(args) - 1):
arg = args[i]
if arg.startswith(('-I', '-D')):
print(arg)
continue
if arg.startswith('-include'):
print(arg + ' ' + args[i + 1])
continue
print_from_variable('COMPILE_CXXFLAGS')
print('-I%s/ipc/chromium/src' % self.topsrcdir)
print('-I%s/ipc/glue' % self.topsrcdir)
print('-I%s/ipc/ipdl/_ipdlheaders' % self.topobjdir)
@CommandProvider
class Package(MachCommandBase):
"""Package the built product for distribution."""
@Command('package', category='post-build',
description='Package the built product for distribution as an APK, DMG, etc.')
@CommandArgument('-v', '--verbose', action='store_true',
help='Verbose output for what commands the packaging process is running.')
def package(self, verbose=False):
ret = self._run_make(directory=".", target='package',
silent=not verbose, ensure_exit_code=False)
if ret == 0:
self.notify('Packaging complete')
return ret
@CommandProvider
class Install(MachCommandBase):
"""Install a package."""
@Command('install', category='post-build',
description='Install the package on the machine, or on a device.')
def install(self):
if conditions.is_android(self):
from mozrunner.devices.android_device import verify_android_device
verify_android_device(self)
ret = self._run_make(directory=".", target='install', ensure_exit_code=False)
if ret == 0:
self.notify('Install complete')
return ret
@CommandProvider
class RunProgram(MachCommandBase):
"""Run the compiled program."""
prog_group = 'the compiled program'
@Command('run', category='post-build',
description='Run the compiled program, possibly under a debugger or DMD.')
@CommandArgument('params', nargs='...', group=prog_group,
help='Command-line arguments to be passed through to the program. Not specifying a --profile or -P option will result in a temporary profile being used.')
@CommandArgumentGroup(prog_group)
@CommandArgument('--remote', '-r', action='store_true', group=prog_group,
help='Do not pass the --no-remote argument by default.')
@CommandArgument('--background', '-b', action='store_true', group=prog_group,
help='Do not pass the --foreground argument by default on Mac.')
@CommandArgument('--noprofile', '-n', action='store_true', group=prog_group,
help='Do not pass the --profile argument by default.')
@CommandArgumentGroup('debugging')
@CommandArgument('--debug', action='store_true', group='debugging',
help='Enable the debugger. Not specifying a --debugger option will result in the default debugger being used.')
@CommandArgument('--debugger', default=None, type=str, group='debugging',
help='Name of debugger to use.')
@CommandArgument('--debugparams', default=None, metavar='params', type=str,
group='debugging',
help='Command-line arguments to pass to the debugger itself; split as the Bourne shell would.')
# Bug 933807 introduced JS_DISABLE_SLOW_SCRIPT_SIGNALS to avoid clever
# segfaults induced by the slow-script-detecting logic for Ion/Odin JITted
# code. If we don't pass this, the user will need to periodically type
# "continue" to (safely) resume execution. There are ways to implement
# automatic resuming; see the bug.
@CommandArgument('--slowscript', action='store_true', group='debugging',
help='Do not set the JS_DISABLE_SLOW_SCRIPT_SIGNALS env variable; when not set, recoverable but misleading SIGSEGV instances may occur in Ion/Odin JIT code.')
@CommandArgumentGroup('DMD')
@CommandArgument('--dmd', action='store_true', group='DMD',
help='Enable DMD. The following arguments have no effect without this.')
@CommandArgument('--mode', choices=['live', 'dark-matter', 'cumulative', 'scan'], group='DMD',
help='Profiling mode. The default is \'dark-matter\'.')
@CommandArgument('--stacks', choices=['partial', 'full'], group='DMD',
help='Allocation stack trace coverage. The default is \'partial\'.')
@CommandArgument('--show-dump-stats', action='store_true', group='DMD',
help='Show stats when doing dumps.')
def run(self, params, remote, background, noprofile, debug, debugger,
debugparams, slowscript, dmd, mode, stacks, show_dump_stats):
if conditions.is_android(self):
# Running Firefox for Android is completely different
if dmd:
print("DMD is not supported for Firefox for Android")
return 1
from mozrunner.devices.android_device import verify_android_device, run_firefox_for_android
if not (debug or debugger or debugparams):
verify_android_device(self, install=True)
return run_firefox_for_android(self, params)
verify_android_device(self, install=True, debugger=True)
args = ['']
else:
try:
binpath = self.get_binary_path('app')
except Exception as e:
print("It looks like your program isn't built.",
"You can run |mach build| to build it.")
print(e)
return 1
args = [binpath]
if params:
args.extend(params)
if not remote:
args.append('-no-remote')
if not background and sys.platform == 'darwin':
args.append('-foreground')
no_profile_option_given = \
all(p not in params for p in ['-profile', '--profile', '-P'])
if no_profile_option_given and not noprofile:
path = os.path.join(self.topobjdir, 'tmp', 'scratch_user')
if not os.path.isdir(path):
os.makedirs(path)
args.append('-profile')
args.append(path)
extra_env = {'MOZ_CRASHREPORTER_DISABLE': '1'}
if debug or debugger or debugparams:
if 'INSIDE_EMACS' in os.environ:
self.log_manager.terminal_handler.setLevel(logging.WARNING)
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
if debugger:
self.debuggerInfo = mozdebug.get_debugger_info(debugger, debugparams)
if not self.debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
# Parameters come from the CLI. We need to convert them before
# their use.
if debugparams:
from mozbuild import shellutil
try:
debugparams = shellutil.split(debugparams)
except shellutil.MetaCharacterException as e:
print("The --debugparams you passed require a real shell to parse them.")
print("(We can't handle the %r character.)" % e.char)
return 1
if not slowscript:
extra_env['JS_DISABLE_SLOW_SCRIPT_SIGNALS'] = '1'
# Prepend the debugger args.
args = [self.debuggerInfo.path] + self.debuggerInfo.args + args
if dmd:
dmd_params = []
if mode:
dmd_params.append('--mode=' + mode)
if stacks:
dmd_params.append('--stacks=' + stacks)
if show_dump_stats:
dmd_params.append('--show-dump-stats=yes')
bin_dir = os.path.dirname(binpath)
lib_name = self.substs['DLL_PREFIX'] + 'dmd' + self.substs['DLL_SUFFIX']
dmd_lib = os.path.join(bin_dir, lib_name)
if not os.path.exists(dmd_lib):
print("Please build with |--enable-dmd| to use DMD.")
return 1
env_vars = {
"Darwin": {
"DYLD_INSERT_LIBRARIES": dmd_lib,
"LD_LIBRARY_PATH": bin_dir,
},
"Linux": {
"LD_PRELOAD": dmd_lib,
"LD_LIBRARY_PATH": bin_dir,
},
"WINNT": {
"MOZ_REPLACE_MALLOC_LIB": dmd_lib,
},
}
arch = self.substs['OS_ARCH']
if dmd_params:
env_vars[arch]["DMD"] = " ".join(dmd_params)
extra_env.update(env_vars.get(arch, {}))
return self.run_process(args=args, ensure_exit_code=False,
pass_thru=True, append_env=extra_env)
@CommandProvider
class Buildsymbols(MachCommandBase):
"""Produce a package of debug symbols suitable for use with Breakpad."""
@Command('buildsymbols', category='post-build',
description='Produce a package of Breakpad-format symbols.')
def buildsymbols(self):
return self._run_make(directory=".", target='buildsymbols', ensure_exit_code=False)
@CommandProvider
class Makefiles(MachCommandBase):
@Command('empty-makefiles', category='build-dev',
description='Find empty Makefile.in in the tree.')
def empty(self):
import pymake.parser
import pymake.parserdata
IGNORE_VARIABLES = {
'DEPTH': ('@DEPTH@',),
'topsrcdir': ('@top_srcdir@',),
'srcdir': ('@srcdir@',),
'relativesrcdir': ('@relativesrcdir@',),
'VPATH': ('@srcdir@',),
}
IGNORE_INCLUDES = [
'include $(DEPTH)/config/autoconf.mk',
'include $(topsrcdir)/config/config.mk',
'include $(topsrcdir)/config/rules.mk',
]
def is_statement_relevant(s):
if isinstance(s, pymake.parserdata.SetVariable):
exp = s.vnameexp
if not exp.is_static_string:
return True
if exp.s not in IGNORE_VARIABLES:
return True
return s.value not in IGNORE_VARIABLES[exp.s]
if isinstance(s, pymake.parserdata.Include):
if s.to_source() in IGNORE_INCLUDES:
return False
return True
for path in self._makefile_ins():
relpath = os.path.relpath(path, self.topsrcdir)
try:
statements = [s for s in pymake.parser.parsefile(path)
if is_statement_relevant(s)]
if not statements:
print(relpath)
except pymake.parser.SyntaxError:
print('Warning: Could not parse %s' % relpath, file=sys.stderr)
def _makefile_ins(self):
for root, dirs, files in os.walk(self.topsrcdir):
for f in files:
if f == 'Makefile.in':
yield os.path.join(root, f)
@CommandProvider
class MachDebug(MachCommandBase):
@Command('environment', category='build-dev',
description='Show info about the mach and build environment.')
@CommandArgument('--format', default='pretty',
choices=['pretty', 'client.mk', 'configure', 'json'],
help='Print data in the given format.')
@CommandArgument('--output', '-o', type=str,
help='Output to the given file.')
@CommandArgument('--verbose', '-v', action='store_true',
help='Print verbose output.')
def environment(self, format, output=None, verbose=False):
func = getattr(self, '_environment_%s' % format.replace('.', '_'))
if output:
# We want to preserve mtimes if the output file already exists
# and the content hasn't changed.
from mozbuild.util import FileAvoidWrite
with FileAvoidWrite(output) as out:
return func(out, verbose)
return func(sys.stdout, verbose)
def _environment_pretty(self, out, verbose):
state_dir = self._mach_context.state_dir
import platform
print('platform:\n\t%s' % platform.platform(), file=out)
print('python version:\n\t%s' % sys.version, file=out)
print('python prefix:\n\t%s' % sys.prefix, file=out)
print('mach cwd:\n\t%s' % self._mach_context.cwd, file=out)
print('os cwd:\n\t%s' % os.getcwd(), file=out)
print('mach directory:\n\t%s' % self._mach_context.topdir, file=out)
print('state directory:\n\t%s' % state_dir, file=out)
print('object directory:\n\t%s' % self.topobjdir, file=out)
if self.mozconfig['path']:
print('mozconfig path:\n\t%s' % self.mozconfig['path'], file=out)
if self.mozconfig['configure_args']:
print('mozconfig configure args:', file=out)
for arg in self.mozconfig['configure_args']:
print('\t%s' % arg, file=out)
if self.mozconfig['make_extra']:
print('mozconfig extra make args:', file=out)
for arg in self.mozconfig['make_extra']:
print('\t%s' % arg, file=out)
if self.mozconfig['make_flags']:
print('mozconfig make flags:', file=out)
for arg in self.mozconfig['make_flags']:
print('\t%s' % arg, file=out)
config = None
try:
config = self.config_environment
except Exception:
pass
if config:
print('config topsrcdir:\n\t%s' % config.topsrcdir, file=out)
print('config topobjdir:\n\t%s' % config.topobjdir, file=out)
if verbose:
print('config substitutions:', file=out)
for k in sorted(config.substs):
print('\t%s: %s' % (k, config.substs[k]), file=out)
print('config defines:', file=out)
for k in sorted(config.defines):
print('\t%s' % k, file=out)
def _environment_client_mk(self, out, verbose):
if self.mozconfig['make_extra']:
for arg in self.mozconfig['make_extra']:
print(arg, file=out)
if self.mozconfig['make_flags']:
print('MOZ_MAKE_FLAGS=%s' % ' '.join(self.mozconfig['make_flags']))
objdir = mozpath.normsep(self.topobjdir)
print('MOZ_OBJDIR=%s' % objdir, file=out)
if 'MOZ_CURRENT_PROJECT' in os.environ:
objdir = mozpath.join(objdir, os.environ['MOZ_CURRENT_PROJECT'])
print('OBJDIR=%s' % objdir, file=out)
if self.mozconfig['path']:
print('FOUND_MOZCONFIG=%s' % mozpath.normsep(self.mozconfig['path']),
file=out)
def _environment_json(self, out, verbose):
import json
class EnvironmentEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, MozbuildObject):
result = {
'topsrcdir': obj.topsrcdir,
'topobjdir': obj.topobjdir,
'mozconfig': obj.mozconfig,
}
if verbose:
result['substs'] = obj.substs
result['defines'] = obj.defines
return result
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
json.dump(self, cls=EnvironmentEncoder, sort_keys=True, fp=out)
class ArtifactSubCommand(SubCommand):
def __call__(self, func):
after = SubCommand.__call__(self, func)
jobchoices = {
'android-api-15',
'android-x86',
'linux',
'linux64',
'macosx64',
'win32',
'win64'
}
args = [
CommandArgument('--tree', metavar='TREE', type=str,
help='Firefox tree.'),
CommandArgument('--job', metavar='JOB', choices=jobchoices,
help='Build job.'),
CommandArgument('--verbose', '-v', action='store_true',
help='Print verbose output.'),
]
for arg in args:
after = arg(after)
return after
@CommandProvider
class PackageFrontend(MachCommandBase):
"""Fetch and install binary artifacts from Mozilla automation."""
@Command('artifact', category='post-build',
description='Use pre-built artifacts to build Firefox.')
def artifact(self):
'''Download, cache, and install pre-built binary artifacts to build Firefox.
Use |mach build| as normal to freshen your installed binary libraries:
artifact builds automatically download, cache, and install binary
artifacts from Mozilla automation, replacing whatever may be in your
object directory. Use |mach artifact last| to see what binary artifacts
were last used.
Never build libxul again!
'''
pass
def _set_log_level(self, verbose):
self.log_manager.terminal_handler.setLevel(logging.INFO if not verbose else logging.DEBUG)
def _install_pip_package(self, package):
if os.environ.get('MOZ_AUTOMATION'):
self.virtualenv_manager._run_pip([
'install',
package,
'--no-index',
'--find-links',
'http://pypi.pub.build.mozilla.org/pub',
'--trusted-host',
'pypi.pub.build.mozilla.org',
])
return
self.virtualenv_manager.install_pip_package(package)
def _make_artifacts(self, tree=None, job=None, skip_cache=False):
# Undo PATH munging that will be done by activating the virtualenv,
# so that invoked subprocesses expecting to find system python
# (git cinnabar, in particular), will not find virtualenv python.
original_path = os.environ.get('PATH', '')
self._activate_virtualenv()
os.environ['PATH'] = original_path
for package in ('pylru==1.0.9',
'taskcluster==0.0.32',
'mozregression==1.0.2'):
self._install_pip_package(package)
state_dir = self._mach_context.state_dir
cache_dir = os.path.join(state_dir, 'package-frontend')
try:
os.makedirs(cache_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
import which
here = os.path.abspath(os.path.dirname(__file__))
build_obj = MozbuildObject.from_environment(cwd=here)
hg = None
if conditions.is_hg(build_obj):
if self._is_windows():
hg = which.which('hg.exe')
else:
hg = which.which('hg')
git = None
if conditions.is_git(build_obj):
if self._is_windows():
git = which.which('git.exe')
else:
git = which.which('git')
# Absolutely must come after the virtualenv is populated!
from mozbuild.artifacts import Artifacts
artifacts = Artifacts(tree, self.substs, self.defines, job,
log=self.log, cache_dir=cache_dir,
skip_cache=skip_cache, hg=hg, git=git,
topsrcdir=self.topsrcdir)
return artifacts
@ArtifactSubCommand('artifact', 'install',
'Install a good pre-built artifact.')
@CommandArgument('source', metavar='SRC', nargs='?', type=str,
help='Where to fetch and install artifacts from. Can be omitted, in '
'which case the current hg repository is inspected; an hg revision; '
'a remote URL; or a local file.',
default=None)
@CommandArgument('--skip-cache', action='store_true',
help='Skip all local caches to force re-fetching remote artifacts.',
default=False)
def artifact_install(self, source=None, skip_cache=False, tree=None, job=None, verbose=False):
self._set_log_level(verbose)
artifacts = self._make_artifacts(tree=tree, job=job, skip_cache=skip_cache)
return artifacts.install_from(source, self.distdir)
@ArtifactSubCommand('artifact', 'last',
'Print the last pre-built artifact installed.')
def artifact_print_last(self, tree=None, job=None, verbose=False):
self._set_log_level(verbose)
artifacts = self._make_artifacts(tree=tree, job=job)
artifacts.print_last()
return 0
@ArtifactSubCommand('artifact', 'print-cache',
'Print local artifact cache for debugging.')
def artifact_print_cache(self, tree=None, job=None, verbose=False):
self._set_log_level(verbose)
artifacts = self._make_artifacts(tree=tree, job=job)
artifacts.print_cache()
return 0
@ArtifactSubCommand('artifact', 'clear-cache',
'Delete local artifacts and reset local artifact cache.')
def artifact_clear_cache(self, tree=None, job=None, verbose=False):
self._set_log_level(verbose)
artifacts = self._make_artifacts(tree=tree, job=job)
artifacts.clear_cache()
return 0
|
monash-merc/cvl-fabric-launcher | refs/heads/master | pyinstaller-2.1/tests/import/test_ctypes_cdll_c.py | 7 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import ctypes, ctypes.util
# Make sure we are able to load the MSVCRXX.DLL we are currently bound
# to through ctypes.
lib = ctypes.CDLL(ctypes.util.find_library('c'))
print lib
|
googleapis/python-tasks | refs/heads/master | google/cloud/tasks_v2beta3/services/cloud_tasks/async_client.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.tasks_v2beta3.services.cloud_tasks import pagers
from google.cloud.tasks_v2beta3.types import cloudtasks
from google.cloud.tasks_v2beta3.types import queue
from google.cloud.tasks_v2beta3.types import queue as gct_queue
from google.cloud.tasks_v2beta3.types import target
from google.cloud.tasks_v2beta3.types import task
from google.cloud.tasks_v2beta3.types import task as gct_task
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import CloudTasksTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import CloudTasksGrpcAsyncIOTransport
from .client import CloudTasksClient
class CloudTasksAsyncClient:
"""Cloud Tasks allows developers to manage the execution of
background work in their applications.
"""
_client: CloudTasksClient
DEFAULT_ENDPOINT = CloudTasksClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = CloudTasksClient.DEFAULT_MTLS_ENDPOINT
queue_path = staticmethod(CloudTasksClient.queue_path)
parse_queue_path = staticmethod(CloudTasksClient.parse_queue_path)
task_path = staticmethod(CloudTasksClient.task_path)
parse_task_path = staticmethod(CloudTasksClient.parse_task_path)
common_billing_account_path = staticmethod(
CloudTasksClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
CloudTasksClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(CloudTasksClient.common_folder_path)
parse_common_folder_path = staticmethod(CloudTasksClient.parse_common_folder_path)
common_organization_path = staticmethod(CloudTasksClient.common_organization_path)
parse_common_organization_path = staticmethod(
CloudTasksClient.parse_common_organization_path
)
common_project_path = staticmethod(CloudTasksClient.common_project_path)
parse_common_project_path = staticmethod(CloudTasksClient.parse_common_project_path)
common_location_path = staticmethod(CloudTasksClient.common_location_path)
parse_common_location_path = staticmethod(
CloudTasksClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudTasksAsyncClient: The constructed client.
"""
return CloudTasksClient.from_service_account_info.__func__(CloudTasksAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudTasksAsyncClient: The constructed client.
"""
return CloudTasksClient.from_service_account_file.__func__(CloudTasksAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> CloudTasksTransport:
"""Returns the transport used by the client instance.
Returns:
CloudTasksTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(CloudTasksClient).get_transport_class, type(CloudTasksClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, CloudTasksTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the cloud tasks client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CloudTasksTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = CloudTasksClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_queues(
self,
request: cloudtasks.ListQueuesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListQueuesAsyncPager:
r"""Lists queues.
Queues are returned in lexicographical order.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.ListQueuesRequest`):
The request object. Request message for
[ListQueues][google.cloud.tasks.v2beta3.CloudTasks.ListQueues].
parent (:class:`str`):
Required. The location name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.services.cloud_tasks.pagers.ListQueuesAsyncPager:
Response message for
[ListQueues][google.cloud.tasks.v2beta3.CloudTasks.ListQueues].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.ListQueuesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_queues,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListQueuesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_queue(
self,
request: cloudtasks.GetQueueRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> queue.Queue:
r"""Gets a queue.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.GetQueueRequest`):
The request object. Request message for
[GetQueue][google.cloud.tasks.v2beta3.CloudTasks.GetQueue].
name (:class:`str`):
Required. The resource name of the queue. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.GetQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_queue,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_queue(
self,
request: cloudtasks.CreateQueueRequest = None,
*,
parent: str = None,
queue: gct_queue.Queue = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_queue.Queue:
r"""Creates a queue.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.CreateQueueRequest`):
The request object. Request message for
[CreateQueue][google.cloud.tasks.v2beta3.CloudTasks.CreateQueue].
parent (:class:`str`):
Required. The location name in which the queue will be
created. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``
The list of allowed locations can be obtained by calling
Cloud Tasks' implementation of
[ListLocations][google.cloud.location.Locations.ListLocations].
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
queue (:class:`google.cloud.tasks_v2beta3.types.Queue`):
Required. The queue to create.
[Queue's name][google.cloud.tasks.v2beta3.Queue.name]
cannot be the same as an existing queue.
This corresponds to the ``queue`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, queue])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.CreateQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if queue is not None:
request.queue = queue
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_queue,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_queue(
self,
request: cloudtasks.UpdateQueueRequest = None,
*,
queue: gct_queue.Queue = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_queue.Queue:
r"""Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.UpdateQueueRequest`):
The request object. Request message for
[UpdateQueue][google.cloud.tasks.v2beta3.CloudTasks.UpdateQueue].
queue (:class:`google.cloud.tasks_v2beta3.types.Queue`):
Required. The queue to create or update.
The queue's
[name][google.cloud.tasks.v2beta3.Queue.name] must be
specified.
Output only fields cannot be modified using UpdateQueue.
Any value specified for an output only field will be
ignored. The queue's
[name][google.cloud.tasks.v2beta3.Queue.name] cannot be
changed.
This corresponds to the ``queue`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
A mask used to specify which fields
of the queue are being updated.
If empty, then all fields will be
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([queue, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.UpdateQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if queue is not None:
request.queue = queue
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_queue,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("queue.name", request.queue.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_queue(
self,
request: cloudtasks.DeleteQueueRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be
created for 7 days.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.DeleteQueueRequest`):
The request object. Request message for
[DeleteQueue][google.cloud.tasks.v2beta3.CloudTasks.DeleteQueue].
name (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.DeleteQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_queue,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def purge_queue(
self,
request: cloudtasks.PurgeQueueRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> queue.Queue:
r"""Purges a queue by deleting all of its tasks.
All tasks created before this method is called are
permanently deleted.
Purge operations can take up to one minute to take
effect. Tasks might be dispatched before the purge takes
effect. A purge is irreversible.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.PurgeQueueRequest`):
The request object. Request message for
[PurgeQueue][google.cloud.tasks.v2beta3.CloudTasks.PurgeQueue].
name (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.PurgeQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.purge_queue,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def pause_queue(
self,
request: cloudtasks.PauseQueueRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> queue.Queue:
r"""Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2beta3.CloudTasks.ResumeQueue].
Tasks can still be added when the queue is paused. A queue is
paused if its [state][google.cloud.tasks.v2beta3.Queue.state] is
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED].
Args:
request (:class:`google.cloud.tasks_v2beta3.types.PauseQueueRequest`):
The request object. Request message for
[PauseQueue][google.cloud.tasks.v2beta3.CloudTasks.PauseQueue].
name (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.PauseQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.pause_queue,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def resume_queue(
self,
request: cloudtasks.ResumeQueueRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> queue.Queue:
r"""Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2beta3.Queue.State.DISABLED]. The
state of a queue is stored in the queue's
[state][google.cloud.tasks.v2beta3.Queue.state]; after calling
this method it will be set to
[RUNNING][google.cloud.tasks.v2beta3.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can lead
to target overloading. If you are resuming high-QPS queues,
follow the 500/50/5 pattern described in `Managing Cloud Tasks
Scaling
Risks <https://cloud.google.com/tasks/docs/manage-cloud-task-scaling>`__.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.ResumeQueueRequest`):
The request object. Request message for
[ResumeQueue][google.cloud.tasks.v2beta3.CloudTasks.ResumeQueue].
name (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Queue:
A queue is a container of related
tasks. Queues are configured to manage
how those tasks are dispatched.
Configurable properties include rate
limits, retry options, queue types, and
others.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.ResumeQueueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.resume_queue,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_iam_policy(
self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a
[Queue][google.cloud.tasks.v2beta3.Queue]. Returns an empty
policy if the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.getIamPolicy``
Args:
request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def set_iam_policy(
self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy for a
[Queue][google.cloud.tasks.v2beta3.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM
permissions yet. Project-level permissions are required to use
the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.setIamPolicy``
Args:
request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_iam_policy,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def test_iam_permissions(
self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that a caller has on a
[Queue][google.cloud.tasks.v2beta3.Queue]. If the resource does
not exist, this will return an empty set of permissions, not a
[NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Args:
request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (:class:`Sequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_tasks(
self,
request: cloudtasks.ListTasksRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTasksAsyncPager:
r"""Lists the tasks in a queue.
By default, only the
[BASIC][google.cloud.tasks.v2beta3.Task.View.BASIC] view is
retrieved due to performance considerations;
[response_view][google.cloud.tasks.v2beta3.ListTasksRequest.response_view]
controls the subset of information which is returned.
The tasks may be returned in any order. The ordering may change
at any time.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.ListTasksRequest`):
The request object. Request message for listing tasks
using
[ListTasks][google.cloud.tasks.v2beta3.CloudTasks.ListTasks].
parent (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.services.cloud_tasks.pagers.ListTasksAsyncPager:
Response message for listing tasks using
[ListTasks][google.cloud.tasks.v2beta3.CloudTasks.ListTasks].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.ListTasksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tasks,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTasksAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_task(
self,
request: cloudtasks.GetTaskRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> task.Task:
r"""Gets a task.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.GetTaskRequest`):
The request object. Request message for getting a task
using
[GetTask][google.cloud.tasks.v2beta3.CloudTasks.GetTask].
name (:class:`str`):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Task:
A unit of scheduled work.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.GetTaskRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_task,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_task(
self,
request: cloudtasks.CreateTaskRequest = None,
*,
parent: str = None,
task: gct_task.Task = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_task.Task:
r"""Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask
command.
- The maximum task size is 100KB.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.CreateTaskRequest`):
The request object. Request message for
[CreateTask][google.cloud.tasks.v2beta3.CloudTasks.CreateTask].
parent (:class:`str`):
Required. The queue name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
The queue must already exist.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
task (:class:`google.cloud.tasks_v2beta3.types.Task`):
Required. The task to add.
Task names have the following format:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``.
The user can optionally specify a task
[name][google.cloud.tasks.v2beta3.Task.name]. If a name
is not specified then the system will generate a random
unique task id, which will be set in the task returned
in the [response][google.cloud.tasks.v2beta3.Task.name].
If
[schedule_time][google.cloud.tasks.v2beta3.Task.schedule_time]
is not set or is in the past then Cloud Tasks will set
it to the current time.
Task De-duplication:
Explicitly specifying a task ID enables task
de-duplication. If a task's ID is identical to that of
an existing task or a task that was deleted or executed
recently then the call will fail with
[ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS]. If the
task's queue was created using Cloud Tasks, then another
task with the same name can't be created for ~1hour
after the original task was deleted or executed. If the
task's queue was created using queue.yaml or queue.xml,
then another task with the same name can't be created
for ~9days after the original task was deleted or
executed.
Because there is an extra lookup cost to identify
duplicate task names, these
[CreateTask][google.cloud.tasks.v2beta3.CloudTasks.CreateTask]
calls have significantly increased latency. Using hashed
strings for the task id or for the prefix of the task id
is recommended. Choosing task ids that are sequential or
have sequential prefixes, for example using a timestamp,
causes an increase in latency and error rates in all
task commands. The infrastructure relies on an
approximately uniform distribution of task ids to store
and serve tasks efficiently.
This corresponds to the ``task`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Task:
A unit of scheduled work.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, task])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.CreateTaskRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if task is not None:
request.task = task
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_task,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_task(
self,
request: cloudtasks.DeleteTaskRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a task.
A task can be deleted if it is scheduled or dispatched.
A task cannot be deleted if it has executed successfully
or permanently failed.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.DeleteTaskRequest`):
The request object. Request message for deleting a task
using
[DeleteTask][google.cloud.tasks.v2beta3.CloudTasks.DeleteTask].
name (:class:`str`):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.DeleteTaskRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_task,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def run_task(
self,
request: cloudtasks.RunTaskRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> task.Task:
r"""Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task,
even if the task is already running, the queue has reached its
[RateLimits][google.cloud.tasks.v2beta3.RateLimits] or is
[PAUSED][google.cloud.tasks.v2beta3.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example,
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] can be
used to retry a failed task after a fix has been made or to
manually force a task to be dispatched now.
The dispatched task is returned. That is, the task that is
returned contains the [status][Task.status] after the task is
dispatched but before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2beta3.Task.schedule_time]
will be reset to the time that
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] was
called plus the retry delay specified in the queue's
[RetryConfig][google.cloud.tasks.v2beta3.RetryConfig].
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
Args:
request (:class:`google.cloud.tasks_v2beta3.types.RunTaskRequest`):
The request object. Request message for forcing a task
to run now using
[RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask].
name (:class:`str`):
Required. The task name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.tasks_v2beta3.types.Task:
A unit of scheduled work.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloudtasks.RunTaskRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.run_task,
default_timeout=20.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-tasks",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CloudTasksAsyncClient",)
|
cbrewster/servo | refs/heads/master | tests/wpt/web-platform-tests/service-workers/service-worker/resources/import-scripts-version.py | 48 | import datetime
import time
epoch = datetime.datetime(1970, 1, 1)
def main(req, res):
# Artificially delay response time in order to ensure uniqueness of
# computed value
time.sleep(0.1)
now = (datetime.datetime.now() - epoch).total_seconds()
return ([
('Cache-Control', 'no-cache, must-revalidate'),
('Pragma', 'no-cache'),
('Content-Type', 'application/javascript')],
'version = "%s";\n' % now)
|
avedaee/DIRAC | refs/heads/integration | WorkloadManagementSystem/JobWrapper/JobWrapper.py | 1 | ########################################################################
# $HeadURL: $
# File : JobWrapper.py
# Author : Stuart Paterson
########################################################################
""" The Job Wrapper Class is instantiated with arguments tailored for running
a particular job. The JobWrapper starts a thread for execution of the job
and a Watchdog Agent that can monitor progress.
"""
__RCSID__ = "$Id: $"
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FailoverTransfer import FailoverTransfer
from DIRAC.Resources.Catalog.PoolXMLFile import getGUID
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestValidator import gRequestValidator
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.JobWrapper.WatchdogFactory import WatchdogFactory
from DIRAC.AccountingSystem.Client.Types.Job import Job as AccountingJob
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemSection
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Utilities.Subprocess import Subprocess
from DIRAC.Core.Utilities.File import getGlobbedTotalSize, getGlobbedFiles
from DIRAC.Core.Utilities.Version import getCurrentVersion
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities import List, Time
from DIRAC.Core.Utilities import DEncode
from DIRAC import S_OK, S_ERROR, gConfig, gLogger, Time
import DIRAC
import os
import re
import sys
import time
import shutil
import threading
import tarfile
import glob
import types
import urllib
EXECUTION_RESULT = {}
class JobWrapper:
#############################################################################
def __init__( self, jobID = None, jobReport = None ):
""" Standard constructor
"""
self.initialTiming = os.times()
self.section = os.path.join( getSystemSection( 'WorkloadManagement/JobWrapper' ), 'JobWrapper' )
self.log = gLogger
# Create the accounting report
self.accountingReport = AccountingJob()
# Initialize for accounting
self.wmsMajorStatus = "unknown"
self.wmsMinorStatus = "unknown"
# Set now as start time
self.accountingReport.setStartTime()
if not jobID:
self.jobID = 0
else:
self.jobID = jobID
self.siteName = gConfig.getValue( '/LocalSite/Site', 'Unknown' )
if jobReport:
self.jobReport = jobReport
else:
self.jobReport = JobReport( self.jobID, 'JobWrapper@%s' % self.siteName )
self.failoverTransfer = FailoverTransfer()
# self.root is the path the Wrapper is running at
self.root = os.getcwd()
# self.localSiteRoot is the path where the local DIRAC installation used to run the payload
# is taken from
self.localSiteRoot = gConfig.getValue( '/LocalSite/Root', DIRAC.rootPath )
# FIXME: Why do we need to load any .cfg file here????
self.__loadLocalCFGFiles( self.localSiteRoot )
result = getCurrentVersion()
if result['OK']:
self.diracVersion = result['Value']
else:
self.diracVersion = 'DIRAC version %s' % DIRAC.buildVersion
self.maxPeekLines = gConfig.getValue( self.section + '/MaxJobPeekLines', 20 )
if self.maxPeekLines < 0:
self.maxPeekLines = 0
self.defaultCPUTime = gConfig.getValue( self.section + '/DefaultCPUTime', 600 )
self.defaultOutputFile = gConfig.getValue( self.section + '/DefaultOutputFile', 'std.out' )
self.defaultErrorFile = gConfig.getValue( self.section + '/DefaultErrorFile', 'std.err' )
self.diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER'] )
self.tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
self.sandboxSizeLimit = gConfig.getValue( self.section + '/OutputSandboxLimit', 1024 * 1024 * 10 )
self.cleanUpFlag = gConfig.getValue( self.section + '/CleanUpFlag', True )
self.pilotRef = gConfig.getValue( '/LocalSite/PilotReference', 'Unknown' )
self.cpuNormalizationFactor = gConfig.getValue ( "/LocalSite/CPUNormalizationFactor", 0.0 )
self.bufferLimit = gConfig.getValue( self.section + '/BufferLimit', 10485760 )
self.defaultOutputSE = gConfig.getValue( '/Resources/StorageElementGroups/SE-USER', [] )
self.defaultCatalog = gConfig.getValue( self.section + '/DefaultCatalog', [] )
self.defaultFailoverSE = gConfig.getValue( '/Resources/StorageElementGroups/Tier1-Failover', [] )
self.defaultOutputPath = ''
self.dm = DataManager()
self.fc = FileCatalog()
self.log.verbose( '===========================================================================' )
self.log.verbose( 'SVN version %s' % ( __RCSID__ ) )
self.log.verbose( self.diracVersion )
self.log.verbose( 'Developer tag: 2' )
self.currentPID = os.getpid()
self.log.verbose( 'Job Wrapper started under PID: %s' % self.currentPID )
# Define a new process group for the job wrapper
self.parentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper parent process group ID: %s' % self.parentPGID )
os.setpgid( self.currentPID, self.currentPID )
self.currentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper process group ID: %s' % self.currentPGID )
self.log.verbose( '==========================================================================' )
self.log.verbose( 'sys.path is: \n%s' % '\n'.join( sys.path ) )
self.log.verbose( '==========================================================================' )
if not os.environ.has_key( 'PYTHONPATH' ):
self.log.verbose( 'PYTHONPATH is: null' )
else:
pypath = os.environ['PYTHONPATH']
self.log.verbose( 'PYTHONPATH is: \n%s' % '\n'.join( pypath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if os.environ.has_key( 'LD_LIBRARY_PATH_SAVE' ):
if os.environ.has_key( 'LD_LIBRARY_PATH' ):
os.environ['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] + ':' + os.environ['LD_LIBRARY_PATH_SAVE']
else:
os.environ['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH_SAVE']
if not os.environ.has_key( 'LD_LIBRARY_PATH' ):
self.log.verbose( 'LD_LIBRARY_PATH is: null' )
else:
ldpath = os.environ['LD_LIBRARY_PATH']
self.log.verbose( 'LD_LIBRARY_PATH is: \n%s' % '\n'.join( ldpath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if not self.cleanUpFlag:
self.log.verbose( 'CleanUp Flag is disabled by configuration' )
# Failure flag
self.failedFlag = True
# Set defaults for some global parameters to be defined for the accounting report
self.owner = 'unknown'
self.jobGroup = 'unknown'
self.jobType = 'unknown'
self.processingType = 'unknown'
self.userGroup = 'unknown'
self.jobClass = 'Single'
self.inputDataFiles = 0
self.outputDataFiles = 0
self.inputDataSize = 0
self.inputSandboxSize = 0
self.outputSandboxSize = 0
self.outputDataSize = 0
self.processedEvents = 0
self.wmsAccountingSent = False
self.jobArgs = {}
self.optArgs = {}
self.ceArgs = {}
#############################################################################
def initialize( self, arguments ):
""" Initializes parameters and environment for job.
"""
self.__report( 'Running', 'Job Initialization' )
self.log.info( 'Starting Job Wrapper Initialization for Job %s' % ( self.jobID ) )
self.jobArgs = arguments['Job']
self.log.verbose( self.jobArgs )
self.ceArgs = arguments ['CE']
self.log.verbose( self.ceArgs )
self.__setInitialJobParameters()
if arguments.has_key( 'Optimizer' ):
self.optArgs = arguments['Optimizer']
else:
self.optArgs = {}
# Fill some parameters for the accounting report
if self.jobArgs.has_key( 'Owner' ):
self.owner = self.jobArgs['Owner']
if self.jobArgs.has_key( 'JobGroup' ):
self.jobGroup = self.jobArgs['JobGroup']
if self.jobArgs.has_key( 'JobType' ):
self.jobType = self.jobArgs['JobType']
if self.jobArgs.has_key( 'InputData' ):
dataParam = self.jobArgs['InputData']
if dataParam and not type( dataParam ) == type( [] ):
dataParam = [dataParam]
self.inputDataFiles = len( dataParam )
if self.jobArgs.has_key( 'OutputData' ):
dataParam = self.jobArgs['OutputData']
if dataParam and not type( dataParam ) == type( [] ):
dataParam = [dataParam]
self.outputDataFiles = len( dataParam )
if self.jobArgs.has_key( 'ProcessingType' ):
self.processingType = self.jobArgs['ProcessingType']
if self.jobArgs.has_key( 'OwnerGroup' ):
self.userGroup = self.jobArgs['OwnerGroup']
# Prepare the working directory and cd to there
if self.jobID:
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
os.mkdir( str( self.jobID ) )
os.chdir( str( self.jobID ) )
else:
self.log.info( 'JobID is not defined, running in current directory' )
infoFile = open( 'job.info', 'w' )
infoFile.write( self.__dictAsInfoString( self.jobArgs, '/Job' ) )
infoFile.close()
#############################################################################
def __setInitialJobParameters( self ):
"""Sets some initial job parameters
"""
parameters = []
if self.ceArgs.has_key( 'LocalSE' ):
parameters.append( ( 'AgentLocalSE', ','.join( self.ceArgs['LocalSE'] ) ) )
if self.ceArgs.has_key( 'CompatiblePlatforms' ):
parameters.append( ( 'AgentCompatiblePlatforms', ','.join( self.ceArgs['CompatiblePlatforms'] ) ) )
if self.ceArgs.has_key( 'PilotReference' ):
parameters.append( ( 'Pilot_Reference', self.ceArgs['PilotReference'] ) )
if self.ceArgs.has_key( 'CPUScalingFactor' ):
parameters.append( ( 'CPUScalingFactor', self.ceArgs['CPUScalingFactor'] ) )
if self.ceArgs.has_key( 'CPUNormalizationFactor' ):
parameters.append( ( 'CPUNormalizationFactor', self.ceArgs['CPUNormalizationFactor'] ) )
parameters.append( ( 'PilotAgent', self.diracVersion ) )
parameters.append( ( 'JobWrapperPID', self.currentPID ) )
result = self.__setJobParamList( parameters )
return result
#############################################################################
def __loadLocalCFGFiles( self, localRoot ):
"""Loads any extra CFG files residing in the local DIRAC site root.
"""
files = os.listdir( localRoot )
self.log.debug( 'Checking directory %s for *.cfg files' % localRoot )
for i in files:
if re.search( '.cfg$', i ):
gConfig.loadFile( '%s/%s' % ( localRoot, i ) )
self.log.debug( 'Found local .cfg file %s' % i )
#############################################################################
def __dictAsInfoString( self, dData, infoString = '', currentBase = "" ):
for key in dData:
value = dData[ key ]
if type( value ) == types.DictType:
infoString = self.__dictAsInfoString( value, infoString, "%s/%s" % ( currentBase, key ) )
elif type( value ) in ( types.ListType, types.TupleType ):
if len( value ) and value[0] == '[':
infoString += "%s/%s = %s\n" % ( currentBase, key, " ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, ", ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, str( value ) )
return infoString
#############################################################################
def execute( self, arguments ):
"""The main execution method of the Job Wrapper
"""
self.log.info( 'Job Wrapper is starting execution phase for job %s' % ( self.jobID ) )
os.environ['DIRACJOBID'] = str( self.jobID )
os.environ['DIRACROOT'] = self.localSiteRoot
self.log.verbose( 'DIRACROOT = %s' % ( self.localSiteRoot ) )
os.environ['DIRACPYTHON'] = sys.executable
self.log.verbose( 'DIRACPYTHON = %s' % ( sys.executable ) )
os.environ['DIRACSITE'] = DIRAC.siteName()
self.log.verbose( 'DIRACSITE = %s' % ( DIRAC.siteName() ) )
outputFile = self.defaultOutputFile
errorFile = self.defaultErrorFile
if self.jobArgs.has_key( 'StdError' ):
errorFile = self.jobArgs['StdError']
if self.jobArgs.has_key( 'StdOutput' ):
outputFile = self.jobArgs['StdOutput']
if self.jobArgs.has_key( 'CPUTime' ):
jobCPUTime = int( self.jobArgs['CPUTime'] )
else:
self.log.info( 'Job %s has no CPU time limit specified, '
'applying default of %s' % ( self.jobID, self.defaultCPUTime ) )
jobCPUTime = self.defaultCPUTime
if self.jobArgs.has_key( 'Executable' ):
executable = self.jobArgs['Executable'].strip()
else:
msg = 'Job %s has no specified executable' % ( self.jobID )
self.log.warn( msg )
return S_ERROR( msg )
jobArguments = ''
if self.jobArgs.has_key( 'Arguments' ):
jobArguments = self.jobArgs['Arguments']
executable = os.path.expandvars( executable )
exeThread = None
spObject = None
if re.search( 'DIRACROOT', executable ):
executable = executable.replace( '$DIRACROOT', self.localSiteRoot )
self.log.verbose( 'Replaced $DIRACROOT for executable as %s' % ( self.localSiteRoot ) )
# Make the full path since . is not always in the PATH
executable = os.path.abspath( executable )
if not os.access( executable, os.X_OK ):
try:
os.chmod( executable, 0775 )
except Exception:
self.log.warn( 'Failed to change mode to 775 for the executable', executable )
exeEnv = dict( os.environ )
if self.jobArgs.has_key( 'ExecutionEnvironment' ):
self.log.verbose( 'Adding variables to execution environment' )
variableList = self.jobArgs['ExecutionEnvironment']
if type( variableList ) == type( " " ):
variableList = [variableList]
for var in variableList:
nameEnv = var.split( '=' )[0]
valEnv = urllib.unquote( var.split( '=' )[1] )
exeEnv[nameEnv] = valEnv
self.log.verbose( '%s = %s' % ( nameEnv, valEnv ) )
if os.path.exists( executable ):
self.__report( 'Running', 'Application', sendFlag = True )
spObject = Subprocess( timeout = False, bufferLimit = int( self.bufferLimit ) )
command = executable
if jobArguments:
command += ' ' + jobArguments
self.log.verbose( 'Execution command: %s' % ( command ) )
maxPeekLines = self.maxPeekLines
exeThread = ExecutionThread( spObject, command, maxPeekLines, outputFile, errorFile, exeEnv )
exeThread.start()
time.sleep( 10 )
payloadPID = spObject.getChildPID()
if not payloadPID:
return S_ERROR( 'Payload process could not start after 10 seconds' )
else:
self.__report( 'Failed', 'Application not found', sendFlag = True )
return S_ERROR( 'Path to executable %s not found' % ( executable ) )
self.__setJobParam( 'PayloadPID', payloadPID )
watchdogFactory = WatchdogFactory()
watchdogInstance = watchdogFactory.getWatchdog( self.currentPID, exeThread, spObject, jobCPUTime )
if not watchdogInstance['OK']:
self.log.warn( watchdogInstance['Message'] )
return S_ERROR( 'Could not create Watchdog instance' )
self.log.verbose( 'WatchdogInstance %s' % ( watchdogInstance ) )
watchdog = watchdogInstance['Value']
self.log.verbose( 'Initializing Watchdog instance' )
watchdog.initialize()
self.log.verbose( 'Calibrating Watchdog instance' )
watchdog.calibrate()
# do not kill Test jobs by CPU time
if self.jobArgs.has_key( 'JobType' ) and self.jobArgs['JobType'] == 'Test':
watchdog.testCPUConsumed = False
if self.jobArgs.has_key( 'DisableCPUCheck' ):
watchdog.testCPUConsumed = False
if exeThread.isAlive():
self.log.info( 'Application thread is started in Job Wrapper' )
watchdog.run()
else:
self.log.warn( 'Application thread stopped very quickly...' )
if exeThread.isAlive():
self.log.warn( 'Watchdog exited before completion of execution thread' )
while exeThread.isAlive():
time.sleep( 5 )
outputs = None
if EXECUTION_RESULT.has_key( 'Thread' ):
threadResult = EXECUTION_RESULT['Thread']
if not threadResult['OK']:
self.log.error( 'Failed to execute the payload', threadResult['Message'] )
self.__report( 'Failed', 'Application thread failed', sendFlag = True )
if 'Value' in threadResult:
outs = threadResult['Value']
if outs:
self.__setJobParam( 'ApplicationError', outs[0], sendFlag = True )
else:
self.__setJobParam( 'ApplicationError', 'None reported', sendFlag = True )
else:
outputs = threadResult['Value']
if EXECUTION_RESULT.has_key( 'CPU' ):
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in JobWrapper execute', cpuString )
if watchdog.checkError:
# In this case, the Watchdog has killed the Payload and the ExecutionThread can not get the CPU statistics
# os.times only reports for waited children
# Take the CPU from the last value recorded by the Watchdog
self.__report( 'Failed', watchdog.checkError, sendFlag = True )
if EXECUTION_RESULT.has_key( 'CPU' ):
if 'LastUpdateCPU(s)' in watchdog.currentStats:
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = watchdog.currentStats['LastUpdateCPU(s)']
if watchdog.currentStats:
self.log.info( 'Statistics collected by the Watchdog:\n ',
'\n '.join( ['%s: %s' % items for items in watchdog.currentStats.items() ] ) )
if outputs:
status = threadResult['Value'][0]
# Send final heartbeat of a configurable number of lines here
self.log.verbose( 'Sending final application standard output heartbeat' )
self.__sendFinalStdOut( exeThread )
self.log.verbose( 'Execution thread status = %s' % ( status ) )
if not watchdog.checkError and not status:
self.failedFlag = False
self.__report( 'Completed', 'Application Finished Successfully', sendFlag = True )
elif not watchdog.checkError:
self.__report( 'Completed', 'Application Finished With Errors', sendFlag = True )
else:
return S_ERROR( 'No outputs generated from job execution' )
self.log.info( 'Checking directory contents after execution:' )
res = systemCall( 5, ['ls', '-al'] )
if not res['OK']:
self.log.error( 'Failed to list the current directory', res['Message'] )
elif res['Value'][0]:
self.log.error( 'Failed to list the current directory', res['Value'][2] )
else:
# no timeout and exit code is 0
self.log.info( res['Value'][1] )
return S_OK()
#############################################################################
def __sendFinalStdOut( self, exeThread ):
"""After the Watchdog process has finished, this function sends a final
report to be presented in the StdOut in the web page via the heartbeat
mechanism.
"""
cpuConsumed = self.__getCPU()['Value']
self.log.info( 'Total CPU Consumed is: %s' % cpuConsumed[1] )
self.__setJobParam( 'TotalCPUTime(s)', cpuConsumed[0] )
normCPU = cpuConsumed[0] * self.cpuNormalizationFactor
self.__setJobParam( 'NormCPUTime(s)', normCPU )
if self.cpuNormalizationFactor:
self.log.info( 'Normalized CPU Consumed is:', normCPU )
result = exeThread.getOutput( self.maxPeekLines )
if not result['OK']:
lines = 0
appStdOut = ''
else:
lines = len( result['Value'] )
appStdOut = '\n'.join( result['Value'] )
header = 'Last %s lines of application output from JobWrapper on %s :' % ( lines, Time.toString() )
border = '=' * len( header )
cpuTotal = 'CPU Total: %s (h:m:s)' % cpuConsumed[1]
cpuTotal += " Normalized CPU Total %.1f s @ HEP'06" % normCPU
header = '\n%s\n%s\n%s\n%s\n' % ( border, header, cpuTotal, border )
appStdOut = header + appStdOut
self.log.info( appStdOut )
heartBeatDict = {}
staticParamDict = {'StandardOutput':appStdOut}
if self.jobID:
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( self.jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.error( 'Problem sending final heartbeat from JobWrapper', result['Message'] )
return
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
# TODO: normalize CPU consumed via scale factor
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in __getCPU', cpuString )
utime, stime, cutime, cstime, elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
self.log.verbose( "Total CPU time consumed = %s" % ( cpuTime ) )
result = self.__getCPUHMS( cpuTime )
return result
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( ( cpuTime, humanTime ) )
#############################################################################
def resolveInputData( self ):
"""Input data is resolved here using a VO specific plugin module.
"""
self.__report( 'Running', 'Input Data Resolution', sendFlag = True )
if self.ceArgs.has_key( 'LocalSE' ):
localSEList = self.ceArgs['LocalSE']
else:
localSEList = gConfig.getValue( '/LocalSite/LocalSE', [] )
if not localSEList:
msg = 'Job has input data requirement but no site LocalSE defined'
self.log.warn( msg )
return S_ERROR( msg )
inputData = self.jobArgs['InputData']
self.log.verbose( 'Input Data is: \n%s' % ( inputData ) )
if type( inputData ) in types.StringTypes:
inputData = [inputData]
if type( localSEList ) in types.StringTypes:
localSEList = List.fromChar( localSEList )
msg = 'Job Wrapper cannot resolve local replicas of input data with null '
if not inputData:
msg += 'job input data parameter '
self.log.warn( msg )
return S_ERROR( msg )
if not localSEList:
msg += 'site localSEList list'
self.log.warn( msg )
# return S_ERROR( msg )
if not self.jobArgs.has_key( 'InputDataModule' ):
msg = 'Job has no input data resolution module specified'
self.log.warn( msg )
# Use the default one
inputDataPolicy = 'DIRAC.WorkloadManagementSystem.Client.InputDataResolution'
else:
inputDataPolicy = self.jobArgs['InputDataModule']
self.log.verbose( 'Job input data requirement is \n%s' % ',\n'.join( inputData ) )
self.log.verbose( 'Job input data resolution policy module is %s' % ( inputDataPolicy ) )
self.log.info( 'Site has the following local SEs: %s' % ', '.join( localSEList ) )
lfns = [ fname.replace( 'LFN:', '' ) for fname in inputData ]
optReplicas = {}
if self.optArgs:
optDict = None
try:
optDict = eval( self.optArgs['InputData'] )
optReplicas = optDict['Value']
self.log.info( 'Found optimizer catalogue result' )
self.log.verbose( optReplicas )
except Exception, x:
optDict = None
self.log.warn( str( x ) )
self.log.warn( 'Optimizer information could not be converted to a dictionary will call catalogue directly' )
resolvedData = {}
result = self.__checkFileCatalog( lfns, optReplicas )
if not result['OK']:
self.log.info( 'Could not obtain replica information from Optimizer File Catalog information' )
self.log.warn( result )
result = self.__checkFileCatalog( lfns )
if not result['OK']:
self.log.warn( 'Could not obtain replica information from File Catalog directly' )
self.log.warn( result )
return S_ERROR( result['Message'] )
else:
resolvedData = result
else:
resolvedData = result
# add input data size to accounting report (since resolution successful)
for lfn, mdata in resolvedData['Value']['Successful'].items():
if mdata.has_key( 'Size' ):
lfnSize = mdata['Size']
if not type( lfnSize ) == type( long( 1 ) ):
try:
lfnSize = long( lfnSize )
except Exception, x:
lfnSize = 0
self.log.info( 'File size for LFN:%s was not a long integer, setting size to 0' % ( lfn ) )
self.inputDataSize += lfnSize
configDict = {'JobID':self.jobID, 'LocalSEList':localSEList, 'DiskSEList':self.diskSE, 'TapeSEList':self.tapeSE}
self.log.info( configDict )
argumentsDict = {'FileCatalog':resolvedData, 'Configuration':configDict, 'InputData':lfns, 'Job':self.jobArgs}
self.log.info( argumentsDict )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( inputDataPolicy, argumentsDict )
if not moduleInstance['OK']:
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Input data resolution failed' )
return result
return S_OK()
#############################################################################
def __checkFileCatalog( self, lfns, optReplicaInfo = None ):
"""This function returns dictionaries containing all relevant parameters
to allow data access from the relevant file catalogue. Optionally, optimizer
parameters can be supplied here but if these are not sufficient, the file catalogue
is subsequently consulted.
N.B. this will be considerably simplified when the DMS evolves to have a
generic FC interface and a single call for all available information.
"""
replicas = optReplicaInfo
if not replicas:
replicas = self.__getReplicaMetadata( lfns )
if not replicas['OK']:
return replicas
self.log.verbose( replicas )
failedGUIDs = []
for lfn, reps in replicas['Value']['Successful'].items():
if not reps.has_key( 'GUID' ):
failedGUIDs.append( lfn )
if failedGUIDs:
self.log.info( 'The following file(s) were found not to have a GUID:\n%s' % ',\n'.join( failedGUIDs ) )
if failedGUIDs:
return S_ERROR( 'File metadata is not available' )
else:
return replicas
#############################################################################
def __getReplicaMetadata( self, lfns ):
""" Wrapper function to consult catalog for all necessary file metadata
and check the result.
"""
start = time.time()
repsResult = self.dm.getReplicas( lfns )
timing = time.time() - start
self.log.info( 'Replica Lookup Time: %.2f seconds ' % ( timing ) )
if not repsResult['OK']:
self.log.warn( repsResult['Message'] )
return repsResult
badLFNCount = 0
badLFNs = []
catalogResult = repsResult['Value']
if catalogResult.has_key( 'Failed' ):
for lfn, cause in catalogResult['Failed'].items():
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: %s' % ( lfn, cause ) )
if catalogResult.has_key( 'Successful' ):
for lfn, replicas in catalogResult['Successful'].items():
if not replicas:
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: Null replica value' % ( lfn ) )
if badLFNCount:
self.log.warn( 'Job Wrapper found %s problematic LFN(s) for job %s' % ( badLFNCount, self.jobID ) )
param = '\n'.join( badLFNs )
self.log.info( param )
self.__setJobParam( 'MissingLFNs', param )
return S_ERROR( 'Input Data Not Available' )
# Must retrieve GUIDs from LFC for files
start = time.time()
guidDict = self.fc.getFileMetadata( lfns )
timing = time.time() - start
self.log.info( 'GUID Lookup Time: %.2f seconds ' % ( timing ) )
if not guidDict['OK']:
self.log.warn( 'Failed to retrieve GUIDs from file catalogue' )
self.log.warn( guidDict['Message'] )
return guidDict
failed = guidDict['Value']['Failed']
if failed:
self.log.warn( 'Could not retrieve GUIDs from catalogue for the following files' )
self.log.warn( failed )
return S_ERROR( 'Missing GUIDs' )
for lfn, reps in repsResult['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
catResult = guidDict
return catResult
#############################################################################
def processJobOutputs( self, arguments ):
"""Outputs for a job may be treated here.
"""
# first iteration of this, no checking of wildcards or oversize sandbox files etc.
outputSandbox = []
if self.jobArgs.has_key( 'OutputSandbox' ):
outputSandbox = self.jobArgs['OutputSandbox']
if not type( outputSandbox ) == type( [] ):
outputSandbox = [ outputSandbox ]
self.log.verbose( 'OutputSandbox files are: %s' % ', '.join( outputSandbox ) )
outputData = []
if self.jobArgs.has_key( 'OutputData' ):
outputData = self.jobArgs['OutputData']
if type( outputData ) != list:
outputData = outputData.split( ';' )
self.log.verbose( 'OutputData files are: %s' % ', '.join( outputData ) )
# First resolve any wildcards for output files and work out if any files are missing
resolvedSandbox = self.__resolveOutputSandboxFiles( outputSandbox )
if not resolvedSandbox['OK']:
self.log.warn( 'Output sandbox file resolution failed:' )
self.log.warn( resolvedSandbox['Message'] )
self.__report( 'Failed', 'Resolving Output Sandbox' )
fileList = resolvedSandbox['Value']['Files']
missingFiles = resolvedSandbox['Value']['Missing']
if missingFiles:
self.jobReport.setJobParameter( 'OutputSandboxMissingFiles', ', '.join( missingFiles ), sendFlag = False )
if not self.jobArgs.has_key( 'Owner' ):
msg = 'Job has no owner specified'
self.log.warn( msg )
return S_OK( msg )
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Uploading Output Sandbox' )
uploadOutputDataInAnyCase = False
if fileList and self.jobID:
self.outputSandboxSize = getGlobbedTotalSize( fileList )
self.log.info( 'Attempting to upload Sandbox with limit:', self.sandboxSizeLimit )
sandboxClient = SandboxStoreClient()
result = sandboxClient.uploadFilesAsSandboxForJob( fileList, self.jobID,
'Output', self.sandboxSizeLimit ) # 1024*1024*10
if not result['OK']:
self.log.error( 'Output sandbox upload failed with message', result['Message'] )
if result.has_key( 'SandboxFileName' ):
outputSandboxData = result['SandboxFileName']
self.log.info( 'Attempting to upload %s as output data' % ( outputSandboxData ) )
if self.failedFlag:
outputData = [outputSandboxData]
uploadOutputDataInAnyCase = True
else:
outputData.append( outputSandboxData )
self.jobReport.setJobParameter( 'OutputSandbox', 'Sandbox uploaded to grid storage', sendFlag = False )
self.jobReport.setJobParameter( 'OutputSandboxLFN',
self.__getLFNfromOutputFile( outputSandboxData )[0], sendFlag = False )
else:
self.log.info( 'Could not get SandboxFileName to attempt upload to Grid storage' )
return S_ERROR( 'Output sandbox upload failed and no file name supplied for failover to Grid storage' )
else:
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Output Sandbox Uploaded' )
self.log.info( 'Sandbox uploaded successfully' )
if ( outputData and not self.failedFlag ) or uploadOutputDataInAnyCase:
# Do not upload outputdata if the job has failed.
# The exception is when the outputData is what was the OutputSandbox, which should be uploaded in any case
if self.jobArgs.has_key( 'OutputSE' ):
outputSE = self.jobArgs['OutputSE']
if type( outputSE ) in types.StringTypes:
outputSE = [outputSE]
else:
outputSE = self.defaultOutputSE
if self.jobArgs.has_key( 'OutputPath' ) and type( self.jobArgs['OutputPath'] ) in types.StringTypes:
outputPath = self.jobArgs['OutputPath']
else:
outputPath = self.defaultOutputPath
if not outputSE and not self.defaultFailoverSE:
return S_ERROR( 'No output SEs defined in VO configuration' )
result = self.__transferOutputDataFiles( outputData, outputSE, outputPath )
if not result['OK']:
return result
return S_OK( 'Job outputs processed' )
#############################################################################
def __resolveOutputSandboxFiles( self, outputSandbox ):
"""Checks the output sandbox file list and resolves any specified wildcards.
Also tars any specified directories.
"""
missing = []
okFiles = []
for i in outputSandbox:
self.log.verbose( 'Looking at OutputSandbox file/directory/wildcard: %s' % i )
globList = glob.glob( i )
for check in globList:
if os.path.isfile( check ):
self.log.verbose( 'Found locally existing OutputSandbox file: %s' % check )
okFiles.append( check )
if os.path.isdir( check ):
self.log.verbose( 'Found locally existing OutputSandbox directory: %s' % check )
cmd = ['tar', 'cf', '%s.tar' % check, check]
result = systemCall( 60, cmd )
if not result['OK']:
self.log.error( 'Failed to create OutputSandbox tar', result['Message'] )
elif result['Value'][0]:
self.log.error( 'Failed to create OutputSandbox tar', result['Value'][2] )
if os.path.isfile( '%s.tar' % ( check ) ):
self.log.verbose( 'Appending %s.tar to OutputSandbox' % check )
okFiles.append( '%s.tar' % ( check ) )
else:
self.log.warn( 'Could not tar OutputSandbox directory: %s' % check )
missing.append( check )
for i in outputSandbox:
if not i in okFiles:
if not '%s.tar' % i in okFiles:
if not re.search( '\*', i ):
if not i in missing:
missing.append( i )
result = {'Missing':missing, 'Files':okFiles}
return S_OK( result )
#############################################################################
def __transferOutputDataFiles( self, outputData, outputSE, outputPath ):
"""Performs the upload and registration in the LFC
"""
self.log.verbose( 'Uploading output data files' )
self.__report( 'Completed', 'Uploading Output Data' )
self.log.info( 'Output data files %s to be uploaded to %s SE' % ( ', '.join( outputData ), outputSE ) )
missing = []
uploaded = []
# Separate outputdata in the form of lfns and local files
lfnList = []
nonlfnList = []
for out in outputData:
if out.lower().find( 'lfn:' ) != -1:
lfnList.append( out )
else:
nonlfnList.append( out )
# Check whether list of outputData has a globbable pattern
globbedOutputList = List.uniqueElements( getGlobbedFiles( nonlfnList ) )
if not globbedOutputList == nonlfnList and globbedOutputList:
self.log.info( 'Found a pattern in the output data file list, files to upload are:',
', '.join( globbedOutputList ) )
nonlfnList = globbedOutputList
outputData = lfnList + nonlfnList
pfnGUID = {}
result = getGUID( outputData )
if not result['OK']:
self.log.warn( 'Failed to determine POOL GUID(s) for output file list (OK if not POOL files)',
result['Message'] )
else:
pfnGUID = result['Value']
for outputFile in outputData:
( lfn, localfile ) = self.__getLFNfromOutputFile( outputFile, outputPath )
if not os.path.exists( localfile ):
self.log.error( 'Missing specified output data file:', outputFile )
continue
# # file size
localfileSize = getGlobbedTotalSize( localfile )
self.outputDataSize += getGlobbedTotalSize( localfile )
outputFilePath = os.path.join( os.getcwd(), localfile )
# # file GUID
fileGUID = pfnGUID[localfile] if localfile in pfnGUID else None
if fileGUID:
self.log.verbose( 'Found GUID for file from POOL XML catalogue %s' % localfile )
# # file checksum
cksm = fileAdler( outputFilePath )
fileMetaDict = { "Size": localfileSize,
"LFN" : lfn,
"ChecksumType" : "Adler32",
"Checksum": cksm,
"GUID" : fileGUID }
outputSEList = self.__getSortedSEList( outputSE )
upload = self.failoverTransfer.transferAndRegisterFile( localfile,
outputFilePath,
lfn,
outputSEList,
fileMetaDict,
self.defaultCatalog )
if upload['OK']:
self.log.info( '"%s" successfully uploaded to "%s" as "LFN:%s"' % ( localfile,
upload['Value']['uploadedSE'],
lfn ) )
uploaded.append( lfn )
continue
self.log.error( 'Could not putAndRegister file',
'%s with LFN %s to %s with GUID %s trying failover storage' % ( localfile, lfn,
', '.join( outputSEList ),
fileGUID ) )
if not self.defaultFailoverSE:
self.log.info( 'No failover SEs defined for JobWrapper,',
'cannot try to upload output file %s anywhere else.' % outputFile )
missing.append( outputFile )
continue
failoverSEs = self.__getSortedSEList( self.defaultFailoverSE )
targetSE = outputSEList[0]
result = self.failoverTransfer.transferAndRegisterFileFailover( localfile,
outputFilePath,
lfn,
targetSE,
failoverSEs,
fileMetaDict,
self.defaultCatalog )
if not result['OK']:
self.log.error( 'Completely failed to upload file to failover SEs with result:\n%s' % result )
missing.append( outputFile )
else:
self.log.info( 'File %s successfully uploaded to failover storage element' % lfn )
uploaded.append( lfn )
# For files correctly uploaded must report LFNs to job parameters
if uploaded:
report = ', '.join( uploaded )
# In case the VO payload has also uploaded data using the same parameter
# name this should be checked prior to setting.
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring', timeout = 120 )
result = monitoring.getJobParameter( int( self.jobID ), 'UploadedOutputData' )
if result['OK']:
if result['Value'].has_key( 'UploadedOutputData' ):
report += ', %s' % result['Value']['UploadedOutputData']
self.jobReport.setJobParameter( 'UploadedOutputData', report, sendFlag = False )
# TODO Notify the user of any output data / output sandboxes
if missing:
self.__setJobParam( 'OutputData', 'MissingFiles: %s' % ', '.join( missing ) )
self.__report( 'Failed', 'Uploading Job OutputData' )
return S_ERROR( 'Failed to upload OutputData' )
self.__report( 'Completed', 'Output Data Uploaded' )
return S_OK( 'OutputData uploaded successfully' )
#############################################################################
def __getSortedSEList( self, seList ):
""" Randomize SE, putting first those that are Local/Close to the Site
"""
if not seList:
return seList
localSEs = []
otherSEs = []
siteSEs = []
seMapping = getSEsForSite( DIRAC.siteName() )
if seMapping['OK'] and seMapping['Value']:
siteSEs = seMapping['Value']
for seName in seList:
if seName in siteSEs:
localSEs.append( seName )
else:
otherSEs.append( seName )
return List.randomize( localSEs ) + List.randomize( otherSEs )
#############################################################################
def __getLFNfromOutputFile( self, outputFile, outputPath = '' ):
"""Provides a generic convention for VO output data
files if no path is specified.
"""
if not re.search( '^LFN:', outputFile ):
localfile = outputFile
initial = self.owner[:1]
vo = getVOForGroup( self.userGroup )
if not vo:
vo = 'dirac'
ops = Operations( vo = vo )
user_prefix = ops.getValue( "LFNUserPrefix", 'user' )
basePath = '/' + vo + '/' + user_prefix + '/' + initial + '/' + self.owner
if outputPath:
# If output path is given, append it to the user path and put output files in this directory
if outputPath.startswith( '/' ):
outputPath = outputPath[1:]
else:
# By default the output path is constructed from the job id
subdir = str( self.jobID / 1000 )
outputPath = subdir + '/' + str( self.jobID )
lfn = os.path.join( basePath, outputPath, os.path.basename( localfile ) )
else:
# if LFN is given, take it as it is
localfile = os.path.basename( outputFile.replace( "LFN:", "" ) )
lfn = outputFile.replace( "LFN:", "" )
return ( lfn, localfile )
#############################################################################
def transferInputSandbox( self, inputSandbox ):
"""Downloads the input sandbox for the job
"""
sandboxFiles = []
registeredISB = []
lfns = []
self.__report( 'Running', 'Downloading InputSandbox' )
if type( inputSandbox ) not in ( types.TupleType, types.ListType ):
inputSandbox = [ inputSandbox ]
for isb in inputSandbox:
if isb.find( "LFN:" ) == 0 or isb.find( "lfn:" ) == 0:
lfns.append( isb )
else:
if isb.find( "SB:" ) == 0:
registeredISB.append( isb )
else:
sandboxFiles.append( os.path.basename( isb ) )
self.log.info( 'Downloading InputSandbox for job %s: %s' % ( self.jobID, ', '.join( sandboxFiles ) ) )
if os.path.exists( '%s/inputsandbox' % ( self.root ) ):
# This is a debugging tool, get the file from local storage to debug Job Wrapper
sandboxFiles.append( 'jobDescription.xml' )
for inputFile in sandboxFiles:
if os.path.exists( '%s/inputsandbox/%s' % ( self.root, inputFile ) ):
self.log.info( 'Getting InputSandbox file %s from local directory for testing' % ( inputFile ) )
shutil.copy( self.root + '/inputsandbox/' + inputFile, inputFile )
result = S_OK( sandboxFiles )
else:
if registeredISB:
for isb in registeredISB:
self.log.info( "Downloading Input SandBox %s" % isb )
result = SandboxStoreClient().downloadSandbox( isb )
if not result[ 'OK' ]:
self.__report( 'Running', 'Failed Downloading InputSandbox' )
return S_ERROR( "Cannot download Input sandbox %s: %s" % ( isb, result[ 'Message' ] ) )
else:
self.inputSandboxSize += result[ 'Value' ]
if lfns:
self.__report( 'Running', 'Downloading InputSandbox LFN(s)' )
lfns = [fname.replace( 'LFN:', '' ).replace( 'lfn:', '' ) for fname in lfns]
download = self.dm.getFile( lfns )
if not download['OK']:
self.log.warn( download )
self.__report( 'Running', 'Failed Downloading InputSandbox LFN(s)' )
return S_ERROR( download['Message'] )
failed = download['Value']['Failed']
if failed:
self.log.warn( 'Could not download InputSandbox LFN(s)' )
self.log.warn( failed )
return S_ERROR( str( failed ) )
for lfn in lfns:
if os.path.exists( '%s/%s' % ( self.root, os.path.basename( download['Value']['Successful'][lfn] ) ) ):
sandboxFiles.append( os.path.basename( download['Value']['Successful'][lfn] ) )
userFiles = sandboxFiles + [ os.path.basename( lfn ) for lfn in lfns ]
for possibleTarFile in userFiles:
if not os.path.exists( possibleTarFile ) :
continue
try:
if os.path.isfile( possibleTarFile ) and tarfile.is_tarfile( possibleTarFile ):
self.log.info( 'Unpacking input sandbox file %s' % ( possibleTarFile ) )
tarFile = tarfile.open( possibleTarFile, 'r' )
for member in tarFile.getmembers():
tarFile.extract( member, os.getcwd() )
except Exception, x :
return S_ERROR( 'Could not untar %s with exception %s' % ( possibleTarFile, str( x ) ) )
if userFiles:
self.inputSandboxSize = getGlobbedTotalSize( userFiles )
self.log.info( "Total size of input sandbox:",
"%0.2f MiB (%s bytes)" % ( self.inputSandboxSize / 1048576.0, self.inputSandboxSize ) )
return S_OK( 'InputSandbox downloaded' )
#############################################################################
def finalize( self, arguments ):
"""Perform any final actions to clean up after job execution.
"""
self.log.info( 'Running JobWrapper finalization' )
# find if there are pending failover requests
requests = self.__getRequestFiles()
outputDataRequest = self.failoverTransfer.getRequest()
requestFlag = len( requests ) > 0 or not outputDataRequest.isEmpty()
if self.failedFlag and requestFlag:
self.log.info( 'Application finished with errors and there are pending requests for this job.' )
self.__report( 'Failed', 'Pending Requests' )
elif not self.failedFlag and requestFlag:
self.log.info( 'Application finished successfully with pending requests for this job.' )
self.__report( 'Completed', 'Pending Requests' )
elif self.failedFlag and not requestFlag:
self.log.info( 'Application finished with errors with no pending requests.' )
self.__report( 'Failed' )
elif not self.failedFlag and not requestFlag:
self.log.info( 'Application finished successfully with no pending requests for this job.' )
self.__report( 'Done', 'Execution Complete' )
self.sendFailoverRequest()
self.__cleanUp()
if self.failedFlag:
return 1
else:
return 0
#############################################################################
def sendWMSAccounting( self, status = '', minorStatus = '' ):
"""Send WMS accounting data.
"""
if self.wmsAccountingSent:
return S_OK()
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
self.accountingReport.setEndTime()
# CPUTime and ExecTime
if not 'CPU' in EXECUTION_RESULT:
# If the payload has not started execution (error with input data, SW, SB,...)
# Execution result is not filled use self.initialTiming
self.log.info( 'EXECUTION_RESULT[CPU] missing in sendWMSAccounting' )
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - self.initialTiming[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in sendWMSAccounting', cpuString )
utime, stime, cutime, cstime, elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
execTime = elapsed
diskSpaceConsumed = getGlobbedTotalSize( os.path.join( self.root, str( self.jobID ) ) )
# Fill the data
acData = {
'User' : self.owner,
'UserGroup' : self.userGroup,
'JobGroup' : self.jobGroup,
'JobType' : self.jobType,
'JobClass' : self.jobClass,
'ProcessingType' : self.processingType,
'FinalMajorStatus' : self.wmsMajorStatus,
'FinalMinorStatus' : self.wmsMinorStatus,
'CPUTime' : cpuTime,
# Based on the factor to convert raw CPU to Normalized units (based on the CPU Model)
'NormCPUTime' : cpuTime * self.cpuNormalizationFactor,
'ExecTime' : execTime,
'InputDataSize' : self.inputDataSize,
'OutputDataSize' : self.outputDataSize,
'InputDataFiles' : self.inputDataFiles,
'OutputDataFiles' : self.outputDataFiles,
'DiskSpace' : diskSpaceConsumed,
'InputSandBoxSize' : self.inputSandboxSize,
'OutputSandBoxSize' : self.outputSandboxSize,
'ProcessedEvents' : self.processedEvents
}
self.log.verbose( 'Accounting Report is:' )
self.log.verbose( acData )
self.accountingReport.setValuesFromDict( acData )
result = self.accountingReport.commit()
# Even if it fails a failover request will be created
self.wmsAccountingSent = True
return result
#############################################################################
def sendFailoverRequest( self, status = '', minorStatus = '' ):
""" Create and send a combined job failover request if any
"""
request = Request()
requestName = 'job_%s' % self.jobID
if 'JobName' in self.jobArgs:
# To make the request names more appealing for users
jobName = self.jobArgs['JobName']
if type( jobName ) == type( ' ' ) and jobName:
jobName = jobName.replace( ' ', '' ).replace( '(', '' ).replace( ')', '' ).replace( '"', '' )
jobName = jobName.replace( '.', '' ).replace( '{', '' ).replace( '}', '' ).replace( ':', '' )
requestName = '%s_%s' % ( jobName, requestName )
if '"' in requestName:
requestName = requestName.replace( '"', '' )
request.RequestName = requestName
request.JobID = self.jobID
request.SourceComponent = "Job_%s" % self.jobID
# JobReport part first
result = self.jobReport.generateForwardDISET()
if result['OK']:
if result["Value"]:
request.addOperation( result["Value"] )
# Accounting part
if not self.jobID:
self.log.verbose( 'No accounting to be sent since running locally' )
else:
result = self.sendWMSAccounting( status, minorStatus )
if not result['OK']:
self.log.warn( 'Could not send WMS accounting with result: \n%s' % result )
if 'rpcStub' in result:
self.log.verbose( 'Adding accounting report to failover request object' )
forwardDISETOp = Operation()
forwardDISETOp.Type = "ForwardDISET"
forwardDISETOp.Arguments = DEncode.encode( result['rpcStub'] )
request.addOperation( forwardDISETOp )
else:
self.log.warn( 'No rpcStub found to construct failover request for WMS accounting report' )
# Failover transfer requests
failoverRequest = self.failoverTransfer.getRequest()
for storedOperation in failoverRequest:
request.addOperation( storedOperation )
# Any other requests in the current directory
rfiles = self.__getRequestFiles()
for rfname in rfiles:
rfile = open( rfname, 'r' )
reqString = rfile.read()
rfile.close()
requestStored = Request( eval( reqString ) )
for storedOperation in requestStored:
request.addOperation( storedOperation )
# The request is ready, send it now
isValid = gRequestValidator.validate( request )
if not isValid["OK"]:
self.log.error( "Failover request is not valid", isValid["Message"] )
else:
# We try several times to put the request before failing the job: it's very important that requests go through,
# or the job will be in an unclear status (workflow ok, but, e.g., the output files won't be registered).
# It's a poor man solution, but I don't see fancy alternatives
for counter in range( 10 ):
requestClient = ReqClient()
result = requestClient.putRequest( request )
if result['OK']:
resDigest = request.getDigest()
digest = resDigest['Value']
self.jobReport.setJobParameter( 'PendingRequest', digest )
break
else:
self.log.error( 'Failed to set failover request', '%d: %s. Re-trying...' % ( counter,
result['Message'] ) )
del requestClient
time.sleep( counter ** 3 )
if not result['OK']:
self.__report( 'Failed', 'Failover Request Failed' )
return result
return S_OK()
#############################################################################
def __getRequestFiles( self ):
"""Simple wrapper to return the list of request files.
"""
return glob.glob( '*_request.json' )
#############################################################################
def __cleanUp( self ):
"""Cleans up after job processing. Can be switched off via environment
variable DO_NOT_DO_JOB_CLEANUP or by JobWrapper configuration option.
"""
# Environment variable is a feature for DIRAC (helps local debugging).
if os.environ.has_key( 'DO_NOT_DO_JOB_CLEANUP' ) or not self.cleanUpFlag:
cleanUp = False
else:
cleanUp = True
os.chdir( self.root )
if cleanUp:
self.log.verbose( 'Cleaning up job working directory' )
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
#############################################################################
def __report( self, status = '', minorStatus = '', sendFlag = False ):
"""Wraps around setJobStatus of state update client
"""
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
jobStatus = self.jobReport.setJobStatus( status = status, minor = minorStatus, sendFlag = sendFlag )
if not jobStatus['OK']:
self.log.warn( jobStatus['Message'] )
if self.jobID:
self.log.verbose( 'setJobStatus(%s,%s,%s,%s)' % ( self.jobID, status, minorStatus, 'JobWrapper' ) )
return jobStatus
#############################################################################
def __setJobParam( self, name, value, sendFlag = False ):
"""Wraps around setJobParameter of state update client
"""
jobParam = self.jobReport.setJobParameter( str( name ), str( value ), sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameter(%s,%s,%s)' % ( self.jobID, name, value ) )
return jobParam
#############################################################################
def __setJobParamList( self, value, sendFlag = False ):
"""Wraps around setJobParameters of state update client
"""
jobParam = self.jobReport.setJobParameters( value, sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameters(%s,%s)' % ( self.jobID, value ) )
return jobParam
###############################################################################
###############################################################################
class ExecutionThread( threading.Thread ):
#############################################################################
def __init__( self, spObject, cmd, maxPeekLines, stdoutFile, stderrFile, exeEnv ):
threading.Thread.__init__( self )
self.cmd = cmd
self.spObject = spObject
self.outputLines = []
self.maxPeekLines = maxPeekLines
self.stdout = stdoutFile
self.stderr = stderrFile
self.exeEnv = exeEnv
#############################################################################
def run( self ):
# FIXME: why local instances of object variables are created?
cmd = self.cmd
spObject = self.spObject
start = time.time()
initialStat = os.times()
output = spObject.systemCall( cmd, env = self.exeEnv, callbackFunction = self.sendOutput, shell = True )
EXECUTION_RESULT['Thread'] = output
timing = time.time() - start
EXECUTION_RESULT['Timing'] = timing
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - initialStat[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
gLogger.info( 'EXECUTION_RESULT[CPU] after Execution of spObject.systemCall', cpuString )
gLogger.info( 'EXECUTION_RESULT[Thread] after Execution of spObject.systemCall', str( EXECUTION_RESULT['Thread'] ) )
#############################################################################
def getCurrentPID( self ):
return self.spObject.getChildPID()
#############################################################################
def sendOutput( self, stdid, line ):
if stdid == 0 and self.stdout:
outputFile = open( self.stdout, 'a+' )
print >> outputFile, line
outputFile.close()
elif stdid == 1 and self.stderr:
errorFile = open( self.stderr, 'a+' )
print >> errorFile, line
errorFile.close()
self.outputLines.append( line )
size = len( self.outputLines )
if size > self.maxPeekLines:
# reduce max size of output peeking
self.outputLines.pop( 0 )
#############################################################################
def getOutput( self, lines = 0 ):
if self.outputLines:
# restrict to smaller number of lines for regular
# peeking by the watchdog
# FIXME: this is multithread, thus single line would be better
if lines:
size = len( self.outputLines )
cut = size - lines
self.outputLines = self.outputLines[cut:]
return S_OK( self.outputLines )
return S_ERROR( 'No Job output found' )
def rescheduleFailedJob( jobID, message, jobReport = None ):
rescheduleResult = 'Rescheduled'
try:
gLogger.warn( 'Failure during %s' % ( message ) )
# Setting a job parameter does not help since the job will be rescheduled,
# instead set the status with the cause and then another status showing the
# reschedule operation.
if not jobReport:
gLogger.info( 'Creating a new JobReport Object' )
jobReport = JobReport( int( jobID ), 'JobWrapper' )
jobReport.setApplicationStatus( 'Failed %s ' % message, sendFlag = False )
jobReport.setJobStatus( 'Rescheduled', message, sendFlag = False )
# We must send Job States and Parameters before it gets reschedule
jobReport.sendStoredStatusInfo()
jobReport.sendStoredJobParameters()
gLogger.info( 'Job will be rescheduled' )
jobManager = RPCClient( 'WorkloadManagement/JobManager' )
result = jobManager.rescheduleJob( int( jobID ) )
if not result['OK']:
gLogger.error( result['Message'] )
if 'Maximum number of reschedulings is reached' in result['Message']:
rescheduleResult = 'Failed'
return rescheduleResult
except Exception:
gLogger.exception( 'JobWrapperTemplate failed to reschedule Job' )
return 'Failed'
# EOF
|
Zac-HD/home-assistant | refs/heads/dev | homeassistant/components/device_tracker/snmp.py | 5 | """
Support for fetching WiFi associations through SNMP.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.snmp/
"""
import binascii
import logging
import threading
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pysnmp==4.3.3']
CONF_COMMUNITY = 'community'
CONF_AUTHKEY = 'authkey'
CONF_PRIVKEY = 'privkey'
CONF_BASEOID = 'baseoid'
DEFAULT_COMMUNITY = 'public'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Inclusive(CONF_AUTHKEY, 'keys'): cv.string,
vol.Inclusive(CONF_PRIVKEY, 'keys'): cv.string,
vol.Required(CONF_BASEOID): cv.string
})
# pylint: disable=unused-argument
def get_scanner(hass, config):
"""Validate the configuration and return an snmp scanner."""
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(DeviceScanner):
"""Queries any SNMP capable Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.entity import config as cfg
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
if CONF_AUTHKEY not in config or CONF_PRIVKEY not in config:
self.auth = cmdgen.CommunityData(config[CONF_COMMUNITY])
else:
self.auth = cmdgen.UsmUserData(
config[CONF_COMMUNITY],
config[CONF_AUTHKEY],
config[CONF_PRIVKEY],
authProtocol=cfg.usmHMACSHAAuthProtocol,
privProtocol=cfg.usmAesCfb128Protocol
)
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.lock = threading.Lock()
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results
if client.get('mac')]
# Supressing no-self-use warning
# pylint: disable=R0201
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
# We have no names
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Ensure the information from the device is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
"""Fetch MAC addresses from access point via SNMP."""
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.auth, self.host, self.baseoid)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
# pylint: disable=no-member
if errstatus:
_LOGGER.error("SNMP error: %s at %s", errstatus.prettyPrint(),
errindex and restable[int(errindex) - 1][0] or '?')
return
for resrow in restable:
for _, val in resrow:
mac = binascii.hexlify(val.asOctets()).decode('utf-8')
_LOGGER.debug("Found MAC %s", mac)
mac = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
devices.append({'mac': mac})
return devices
|
MinimalOS-AOSP/platform_external_skia | refs/heads/mm-6.0 | tools/skp/page_sets/skia_carsvg_desktop.py | 33 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_carsvg_desktop.json'
class SkiaCarsvgDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaCarsvgDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_carsvg_desktop.json')
urls_list = [
# Why: from fmalita
'http://codinginparadise.org/projects/svgweb/samples/svg-files/car.svg',
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
|
PalisadoesFoundation/switchmap-ng | refs/heads/master | switchmap/snmp/snmp_manager.py | 1 | #!/usr/bin/env python3
"""SNMP manager class."""
import os
import easysnmp
from easysnmp import exceptions
# Import project libraries
from switchmap.utils import log
from switchmap.utils import daemon
from switchmap.snmp import iana_enterprise
class Validate(object):
"""Class Verify SNMP data."""
def __init__(self, hostname, snmp_config):
"""Intialize the class.
Args:
hostname: Name of host
snmp_config: List of dicts of possible snmp configurations
Returns:
None
"""
# Initialize key variables
self.snmp_config = snmp_config
self.hostname = hostname
def credentials(self):
"""Determine the valid SNMP credentials for a host.
Args:
None
Returns:
credentials: Dict of snmp_credentials to use
"""
# Initialize key variables
cache_exists = False
group_key = 'group_name'
# Create cache directory / file if not yet created
filename = daemon.snmp_file(self.hostname)
if os.path.exists(filename) is True:
cache_exists = True
# Create file if necessary
if cache_exists is False:
# Get credentials
credentials = self._credentials()
# Save credentials if successful
if credentials is not None:
_update_cache(filename, credentials[group_key])
else:
# Read credentials from cache
if os.path.isfile(filename):
with open(filename) as f_handle:
group_name = f_handle.readline()
# Get credentials
credentials = self._credentials(group_name)
# Try the rest if these credentials fail
if credentials is None:
credentials = self._credentials()
# Update cache if found
if credentials is not None:
_update_cache(filename, credentials[group_key])
# Return
return credentials
def _credentials(self, group=None):
"""Determine the valid SNMP credentials for a host.
Args:
group: SNMP group name to try
Returns:
credentials: Dict of snmp_credentials to use
"""
# Initialize key variables
credentials = None
# Probe device with all SNMP options
for params_dict in self.snmp_config:
# Only process enabled SNMP values
if bool(params_dict['enabled']) is False:
continue
# Update credentials
params_dict['snmp_hostname'] = self.hostname
# Setup contact with the remote device
device = Interact(params_dict)
# Try successive groups
if group is None:
# Verify connectivity
if device.contactable() is True:
credentials = params_dict
break
else:
if params_dict['group_name'] == group:
# Verify connectivity
if device.contactable() is True:
credentials = params_dict
# Return
return credentials
class Interact(object):
"""Class Gets SNMP data."""
def __init__(self, snmp_parameters):
"""Intialize the class.
Args:
snmp_parameters: Dict of SNMP parameters to use
Returns:
None
"""
# Initialize key variables
self._snmp_params = snmp_parameters
# Fail if snmp_parameters dictionary is empty
if snmp_parameters['snmp_version'] is None:
log_message = (
'SNMP version is "None". Non existent host? - {}'
''.format(snmp_parameters['snmp_hostname']))
log.log2die(1004, log_message)
# Fail if snmp_parameters dictionary is empty
if bool(snmp_parameters) is False:
log_message = ('SNMP parameters provided are blank. '
'Non existent host?')
log.log2die(1005, log_message)
def enterprise_number(self):
"""Return SNMP enterprise number for the device.
Args:
None
Returns:
enterprise: SNMP enterprise number
"""
# Get the sysObjectID.0 value of the device
sysid = self.sysobjectid()
# Get the vendor ID
enterprise_obj = iana_enterprise.Query(sysobjectid=sysid)
enterprise = enterprise_obj.enterprise()
# Return
return enterprise
def hostname(self):
"""Return SNMP hostname for the interaction.
Args:
None
Returns:
hostname: SNMP hostname
"""
# Initialize key variables
hostname = self._snmp_params['snmp_hostname']
# Return
return hostname
def contactable(self):
"""Check if device is contactable.
Args:
device_id: Device ID
Returns:
contactable: True if a contactable
"""
# Define key variables
contactable = False
result = None
# Try to reach device
try:
# If we can poll the SNMP sysObjectID,
# then the device is contactable
result = self.sysobjectid(check_reachability=True)
if bool(result) is True:
contactable = True
except Exception as _:
# Not contactable
contactable = False
except:
# Log a message
log_message = (
'Unexpected SNMP error for device {}'
''.format(self._snmp_params['snmp_hostname']))
log.log2die(1008, log_message)
# Return
return contactable
def sysobjectid(self, check_reachability=False):
"""Get the sysObjectID of the device.
Args:
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
Returns:
object_id: sysObjectID value
"""
# Initialize key variables
oid = '.1.3.6.1.2.1.1.2.0'
object_id = None
# Get sysObjectID
results = self.get(oid, check_reachability=check_reachability)
if bool(results) is True:
object_id = results[oid].decode('utf-8')
# Return
return object_id
def oid_exists(self, oid_to_get, context_name=''):
"""Determine existence of OID on device.
Args:
oid_to_get: OID to get
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
validity: True if exists
"""
# Initialize key variables
validity = False
# Validate OID
if self._oid_exists_get(
oid_to_get, context_name=context_name) is True:
validity = True
if validity is False:
if self._oid_exists_walk(
oid_to_get, context_name=context_name) is True:
validity = True
# Return
return validity
def _oid_exists_get(self, oid_to_get, context_name=''):
"""Determine existence of OID on device.
Args:
oid_to_get: OID to get
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
validity: True if exists
"""
# Initialize key variables
validity = False
# Process
(_, validity, result) = self.query(
oid_to_get,
get=True,
check_reachability=True, context_name=context_name,
check_existence=True)
# If we get no result, then override validity
if result[oid_to_get] is None:
validity = False
# Return
return validity
def _oid_exists_walk(self, oid_to_get, context_name=''):
"""Determine existence of OID on device.
Args:
oid_to_get: OID to get
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
validity: True if exists
"""
# Initialize key variables
validity = False
# Process
(_, validity, result) = self.query(
oid_to_get, get=False,
check_reachability=True,
context_name=context_name,
check_existence=True)
# If we get no result, then override validity
if bool(result) is False:
validity = False
# Return
return validity
def swalk(self, oid_to_get, normalized=False, context_name=''):
"""Do a failsafe SNMPwalk.
Args:
oid_to_get: OID to get
normalized: If True, then return results as a dict keyed by
only the last node of an OID, otherwise return results
keyed by the entire OID string. Normalization is useful
when trying to create multidimensional dicts where the
primary key is a universal value such as IF-MIB::ifIndex
or BRIDGE-MIB::dot1dBasePort
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
results: Results
"""
# Process data
results = self.walk(
oid_to_get,
normalized=normalized, check_reachability=True,
check_existence=True,
context_name=context_name)
# Return
return results
def walk(
self, oid_to_get, normalized=False,
check_reachability=False, check_existence=False, context_name=''):
"""Do an SNMPwalk.
Args:
oid_to_get: OID to walk
normalized: If True, then return results as a dict keyed by
only the last node of an OID, otherwise return results
keyed by the entire OID string. Normalization is useful
when trying to create multidimensional dicts where the
primary key is a universal value such as IF-MIB::ifIndex
or BRIDGE-MIB::dot1dBasePort
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
result: Dictionary of tuples (OID, value)
"""
(_, _, result) = self.query(
oid_to_get, get=False,
check_reachability=check_reachability,
check_existence=check_existence,
normalized=normalized, context_name=context_name)
return result
def get(
self, oid_to_get, check_reachability=False, check_existence=False,
normalized=False, context_name=''):
"""Do an SNMPget.
Args:
oid_to_get: OID to get
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
normalized: If True, then return results as a dict keyed by
only the last node of an OID, otherwise return results
keyed by the entire OID string. Normalization is useful
when trying to create multidimensional dicts where the
primary key is a universal value such as IF-MIB::ifIndex
or BRIDGE-MIB::dot1dBasePort
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
Dictionary of tuples (OID, value)
"""
(_, _, result) = self.query(
oid_to_get, get=True,
check_reachability=check_reachability,
check_existence=check_existence,
normalized=normalized,
context_name=context_name)
return result
def query(
self, oid_to_get, get=False, check_reachability=False,
check_existence=False, normalized=False, context_name=''):
"""Do an SNMP query.
Args:
oid_to_get: OID to walk
get: Flag determining whether to do a GET or WALK
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
normalized: If True, then return results as a dict keyed by
only the last node of an OID, otherwise return results
keyed by the entire OID string. Normalization is useful
when trying to create multidimensional dicts where the
primary key is a universal value such as IF-MIB::ifIndex
or BRIDGE-MIB::dot1dBasePort
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
Dictionary of tuples (OID, value)
"""
# Initialize variables
snmp_params = self._snmp_params
_contactable = True
exists = True
results = []
# Check if OID is valid
if _oid_valid_format(oid_to_get) is False:
log_message = ('OID {} has an invalid format'.format(oid_to_get))
log.log2die(1020, log_message)
# Create SNMP session
session = _Session(snmp_params, context_name=context_name).session
# Create failure log message
try_log_message = (
'Error occurred during SNMP query on host '
'OID {} from {} for context "{}"'
''.format(
oid_to_get, snmp_params['snmp_hostname'],
context_name))
# Fill the results object by getting OID data
try:
# Get the data
if get is True:
results = [session.get(oid_to_get)]
else:
results = session.bulkwalk(
oid_to_get, non_repeaters=0, max_repetitions=25)
# Crash on error, return blank results if doing certain types of
# connectivity checks
except exceptions.EasySNMPConnectionError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPTimeoutError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPUnknownObjectIDError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPNoSuchNameError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPNoSuchObjectError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPNoSuchInstanceError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except exceptions.EasySNMPUndeterminedTypeError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except SystemError as exception_error:
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence, system_error=True)
except:
log_message = ('Unexpected error')
log.log2die(1002, log_message)
# Format results
values = _format_results(results, normalized=normalized)
# Return
return (_contactable, exists, values)
class _Session(object):
"""Class to create an SNMP session with a device."""
def __init__(self, snmp_parameters, context_name=''):
"""Initialize the class.
Args:
snmp_parameters: Dict of SNMP paramerters
context_name: Name of context
Returns:
session: SNMP session
"""
# Initialize key variables
self._snmp_params = {}
self._context_name = context_name
# Assign variables
self._snmp_params = snmp_parameters
# Fail if snmp_parameters dictionary is empty
if snmp_parameters['snmp_version'] is None:
log_message = (
'SNMP version is "None". Non existent host? - {}'
''.format(snmp_parameters['snmp_hostname']))
log.log2die(1004, log_message)
# Fail if snmp_parameters dictionary is empty
if not snmp_parameters:
log_message = ('SNMP parameters provided are blank. '
'Non existent host?')
log.log2die(1005, log_message)
# Create SNMP session
self.session = self._session()
def _session(self):
"""Create an SNMP session for queries.
Args:
None
Returns:
session: SNMP session
"""
# Create session
if self._snmp_params['snmp_version'] != 3:
session = easysnmp.Session(
community=self._snmp_params['snmp_community'],
hostname=self._snmp_params['snmp_hostname'],
version=self._snmp_params['snmp_version'],
remote_port=self._snmp_params['snmp_port'],
use_numeric=True,
context=self._context_name
)
else:
session = easysnmp.Session(
hostname=self._snmp_params['snmp_hostname'],
version=self._snmp_params['snmp_version'],
remote_port=self._snmp_params['snmp_port'],
use_numeric=True,
context=self._context_name,
security_level=self._security_level(),
security_username=self._snmp_params['snmp_secname'],
privacy_protocol=self._priv_protocol(),
privacy_password=self._snmp_params['snmp_privpassword'],
auth_protocol=self._auth_protocol(),
auth_password=self._snmp_params['snmp_authpassword']
)
# Return
return session
def _security_level(self):
"""Create string for security level.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: security level
"""
# Initialize key variables
snmp_params = self._snmp_params
# Determine the security level
if bool(snmp_params['snmp_authprotocol']) is True:
if bool(snmp_params['snmp_privprotocol']) is True:
result = 'authPriv'
else:
result = 'authNoPriv'
else:
result = 'noAuthNoPriv'
# Return
return result
def _auth_protocol(self):
"""Get AuthProtocol to use.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: Protocol to be used in session
"""
# Initialize key variables
snmp_params = self._snmp_params
protocol = snmp_params['snmp_authprotocol']
# Setup AuthProtocol (Default SHA)
if bool(protocol) is False:
result = 'DEFAULT'
else:
if protocol.lower() == 'md5':
result = 'MD5'
else:
result = 'SHA'
# Return
return result
def _priv_protocol(self):
"""Get privProtocol to use.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: Protocol to be used in session
"""
# Initialize key variables
snmp_params = self._snmp_params
protocol = snmp_params['snmp_privprotocol']
# Setup privProtocol (Default AES256)
if bool(protocol) is False:
result = 'DEFAULT'
else:
if protocol.lower() == 'des':
result = 'DES'
else:
result = 'AES'
# Return
return result
def _process_error(
log_message, exception_error, check_reachability,
check_existence, system_error=False):
"""Process the SNMP error.
Args:
params_dict: Dict of SNMP parameters to try
Returns:
alive: True if contactable
"""
# Initialize key varialbes
_contactable = True
exists = True
if system_error is False:
error_name = exception_error.__name__
else:
error_name = 'SystemError'
# Check existence of OID
if check_existence is True:
if system_error is False:
if error_name == 'EasySNMPUnknownObjectIDError':
exists = False
return (_contactable, exists)
elif error_name == 'EasySNMPNoSuchNameError':
exists = False
return (_contactable, exists)
elif error_name == 'EasySNMPNoSuchObjectError':
exists = False
return (_contactable, exists)
elif error_name == 'EasySNMPNoSuchInstanceError':
exists = False
return (_contactable, exists)
else:
exists = False
return (_contactable, exists)
# Checking if the device is reachable
if check_reachability is True:
_contactable = False
exists = False
return (_contactable, exists)
# Die an agonizing death!
log_message = '{}: ({})'.format(log_message, error_name)
log.log2die(1023, log_message)
def _format_results(results, normalized=False):
"""Normalize the results of an walk.
Args:
results: List of lists of results
normalized: If True, then return results as a dict keyed by
only the last node of an OID, otherwise return results
keyed by the entire OID string. Normalization is useful
when trying to create multidimensional dicts where the
primary key is a universal value such as IF-MIB::ifIndex
or BRIDGE-MIB::dot1dBasePort
Returns:
return_results: Dict of results
"""
# Initialize key variables
return_results = {}
for result in results:
if normalized is True:
return_results[result.oid_index] = _convert(result)
else:
return_results[
'{}.{}'.format(
result.oid, result.oid_index)] = _convert(result)
# Return
return return_results
def _convert(result):
"""Convert value from pysnmp object to standard python types.
Args:
result: Named tuple result
Returns:
converted: converted value. Only returns BYTES and INTEGERS
"""
# Initialieze key values
converted = None
value = result.value
snmp_type = result.snmp_type
# Convert string type values to bytes
if snmp_type.upper() == 'OCTETSTR':
converted = bytes(value, 'utf-8')
elif snmp_type.upper() == 'OPAQUE':
converted = bytes(value, 'utf-8')
elif snmp_type.upper() == 'BITS':
converted = bytes(value, 'utf-8')
elif snmp_type.upper() == 'IPADDR':
converted = bytes(value, 'utf-8')
elif snmp_type.upper() == 'NETADDR':
converted = bytes(value, 'utf-8')
elif snmp_type.upper() == 'OBJECTID':
# DO NOT CHANGE !!!
converted = bytes(str(value), 'utf-8')
elif snmp_type.upper() == 'NOSUCHOBJECT':
# Nothing if OID not found
converted = None
elif snmp_type.upper() == 'NOSUCHINSTANCE':
# Nothing if OID not found
converted = None
elif snmp_type.upper() == 'ENDOFMIBVIEW':
# Nothing
converted = None
elif snmp_type.upper() == 'NULL':
# Nothing
converted = None
else:
# Convert everything else into integer values
# rfc1902.Integer
# rfc1902.Integer32
# rfc1902.Counter32
# rfc1902.Gauge32
# rfc1902.Unsigned32
# rfc1902.TimeTicks
# rfc1902.Counter64
converted = int(value)
# Return
return converted
def _oid_valid_format(oid):
"""Determine whether the format of the oid is correct.
Args:
oid: OID string
Returns:
True if valid
"""
# oid cannot be numeric
if isinstance(oid, str) is False:
return False
# Make sure the oid is not blank
stripped_oid = oid.strip()
if not stripped_oid:
return False
# Must start with a '.'
if oid[0] != '.':
return False
# Must not end with a '.'
if oid[-1] == '.':
return False
# Test each octet to be numeric
octets = oid.split('.')
# Remove the first element of the list
octets.pop(0)
for value in octets:
try:
int(value)
except:
return False
# Otherwise valid
return True
def _update_cache(filename, snmp_group):
"""Update the SNMP credentials cache file with successful snmp_group.
Args:
filename: Cache filename
group: SNMP group that successfully authenticated
Returns:
None
"""
# Do update
with open(filename, 'w+') as env:
env.write(snmp_group)
|
Tejal011089/digitales_erpnext | refs/heads/develop | erpnext/stock/report/stock_projected_qty/stock_projected_qty.py | 24 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = frappe.db.sql("""select
item.name, item.item_name, description, item_group, brand, warehouse, item.stock_uom,
actual_qty, planned_qty, indented_qty, ordered_qty, reserved_qty,
projected_qty, item.re_order_level, item.re_order_qty,
(item.re_order_level - projected_qty) as shortage_qty
from `tabBin` bin,
(select name, company from tabWarehouse
{warehouse_conditions}) wh,
(select name, item_name, description, stock_uom, item_group,
brand, re_order_level, re_order_qty
from `tabItem` {item_conditions}) item
where item_code = item.name and warehouse = wh.name
order by item.name, wh.name"""\
.format(item_conditions=get_item_conditions(filters),
warehouse_conditions=get_warehouse_conditions(filters)), filters)
return columns, data
def get_columns():
return [_("Item Code") + ":Link/Item:140", _("Item Name") + "::100", _("Description") + "::200",
_("Item Group") + ":Link/Item Group:100", _("Brand") + ":Link/Brand:100", _("Warehouse") + ":Link/Warehouse:120",
_("UOM") + ":Link/UOM:100", _("Actual Qty") + ":Float:100", _("Planned Qty") + ":Float:100",
_("Requested Qty") + ":Float:110", _("Ordered Qty") + ":Float:100", _("Reserved Qty") + ":Float:100",
_("Projected Qty") + ":Float:100", _("Reorder Level") + ":Float:100", _("Reorder Qty") + ":Float:100",
_("Shortage Qty") + ":Float:100"]
def get_item_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("name=%(item_code)s")
if filters.get("brand"):
conditions.append("brand=%(brand)s")
return "where {}".format(" and ".join(conditions)) if conditions else ""
def get_warehouse_conditions(filters):
conditions = []
if filters.get("company"):
conditions.append("company=%(company)s")
if filters.get("warehouse"):
conditions.append("name=%(warehouse)s")
return "where {}".format(" and ".join(conditions)) if conditions else "" |
xhongyi/toybrick | refs/heads/master | Prefetch/Benchmark/prefetch_benchmark.py | 1 | #!/usr/bin/python
import os
import re
import sys
import prefetch_run as pf
"""
Define the runtime constants
"""
MAX_PREFETCH_HEAD_START = 1000
STRIDE = 1
RUNS_PER_TEST = 1
INPUT_READ_FILE = 'check0'
OUTPUT_BENCHMARK_FILE = 'constant_prefetch.dat'
"""
List to the store the results during the loop
"""
time_to_completion = [[[0 for i in range(13)] for j in range(RUNS_PER_TEST) ] for i in range(0, MAX_PREFETCH_HEAD_START, STRIDE) ]
"""
Print the header information to the intermediate data file
"""
fout = open("safety.out", "w")
fout.write("pref dis" + "\t" + "time(s)" + "\t" + "L1-dcache-loads" + "\t" + "L1-dcache-stores" + "\t" + "L1-dcache-load-stores" + "\t" + "L1-dcache-load-misses" + "\t" + "L1-dcache-store-misses" + "\t" + "LLC-loads" + "\t" + "LLC-stores" + "\t" + "LLC-load-stores" + "\t" + "LLC-load-misses" + "\t" + "LLC-store-misses" + "\t" + "LLC-prefetches" + "\t" + "LLC-prefetch-misses" + "\n")
for trial in range(RUNS_PER_TEST):
for prefetch_head_start in range(0, MAX_PREFETCH_HEAD_START, STRIDE):
# compute the current index
curr_index = prefetch_head_start / STRIDE
"""
Run test and parse results
"""
out, perf_data = pf.run(prefetch_head_start)
time, perf_results = pf.parse(out, perf_data)
time_to_completion[curr_index][trial][0] = time
time_to_completion[curr_index][trial][1:] = perf_results
"""
Output the results to the console
"""
sys.stdout.write(str(prefetch_head_start) + "\t")
for i in range(len(time_to_completion[curr_index][trial]) - 1):
sys.stdout.write(str(time_to_completion[curr_index][trial][i]) + "\t")
sys.stdout.write(str(time_to_completion[curr_index][trial][len(time_to_completion[curr_index][trial]) - 1]) + "\n")
"""
Output the results to a file in case of error during subsequent loops
"""
fout.write(str(prefetch_head_start) + "\t")
for i in range(len(time_to_completion[curr_index][trial]) - 1):
fout.write(str(time_to_completion[curr_index][trial][i]) + "\t")
fout.write(str(time_to_completion[curr_index][trial][len(time_to_completion[curr_index][trial]) - 1]) + "\n")
fout.flush()
"""
open the file and write out the collected data
"""
f = open(OUTPUT_BENCHMARK_FILE, "w")
"""
Write the file header
"""
f.write("pref dis" + "\t" + "time(s)" + "\t" + "L1-dcache-loads" + "\t" + "L1-dcache-stores" + "\t" + "L1-dcache-load-stores" + "\t" + "L1-dcache-load-misses" + "\t" + "L1-dcache-store-misses" + "\t" + "LLC-loads" + "\t" + "LLC-stores" + "\t" + "LLC-load-stores" + "\t" + "LLC-load-misses" + "\t" + "LLC-store-misses" + "\t" + "LLC-prefetches" + "\t" + "LLC-prefetch-misses" + "\n")
"""
Output the data in the main loop
"""
for prefetch_head_start in range(0, MAX_PREFETCH_HEAD_START, STRIDE):
for trial in range(RUNS_PER_TEST):
curr_index = prefetch_head_start / STRIDE
f.write(str(prefetch_head_start) + "\t")
for i in range(len(time_to_completion[curr_index][trial]) - 1):
f.write(str(time_to_completion[curr_index][trial][i]) + "\t")
f.write(str(time_to_completion[curr_index][trial][len(time_to_completion[curr_index][trial]) - 1]) + "\n")
f.flush()
f.close()
fout.close()
|
windyuuy/opera | refs/heads/master | chromium/src/third_party/python_26/Lib/site-packages/win32comext/shell/demos/browse_for_folder.py | 47 | # A couple of samples using SHBrowseForFolder
import sys, os
from win32com.shell import shell, shellcon
import win32gui
# A callback procedure - called by SHBrowseForFolder
def BrowseCallbackProc(hwnd, msg, lp, data):
if msg== shellcon.BFFM_INITIALIZED:
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSELECTION, 1, data)
elif msg == shellcon.BFFM_SELCHANGED:
# Set the status text of the
# For this message, 'lp' is the address of the PIDL.
pidl = shell.AddressAsPIDL(lp)
try:
path = shell.SHGetPathFromIDList(pidl)
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSTATUSTEXT, 0, path)
except shell.error:
# No path for this PIDL
pass
if __name__=='__main__':
# Demonstrate a dialog with the cwd selected as the default - this
# must be done via a callback function.
flags = shellcon.BIF_STATUSTEXT
shell.SHBrowseForFolder(0, # parent HWND
None, # root PIDL.
"Default of %s" % os.getcwd(), # title
flags, # flags
BrowseCallbackProc, # callback function
os.getcwd() # 'data' param for the callback
)
# Browse from this directory down only.
# Get the PIDL for the cwd.
desktop = shell.SHGetDesktopFolder()
cb, pidl, extra = desktop.ParseDisplayName(0, None, os.getcwd())
shell.SHBrowseForFolder(0, # parent HWND
pidl, # root PIDL.
"From %s down only" % os.getcwd(), # title
)
|
gazeti/aleph | refs/heads/master | aleph/search/entities.py | 2 | import json
from normality import ascii_text
from pprint import pprint # noqa
from aleph.core import url_for, es, es_index, schemata
from aleph.index import TYPE_ENTITY, TYPE_DOCUMENT
from aleph.search.util import execute_basic
from aleph.search.fragments import match_all, filter_query, multi_match
from aleph.search.fragments import add_filter, aggregate, authz_filter
from aleph.search.facet import parse_facet_result
DEFAULT_FIELDS = ['collection_id', 'roles', 'dataset', 'name', 'data',
'countries', 'schema', 'schemata', 'properties',
'fingerprints', 'state']
def entities_query(state, fields=None, facets=True, doc_counts=False):
"""Parse a user query string, compose and execute a query."""
if state.has_text:
q = {
"query_string": {
"query": state.text,
"fields": ['name^5', 'names^2', 'text'],
"default_operator": "AND",
"use_dis_max": True
}
}
else:
q = match_all()
if state.raw_query:
q = {"bool": {"must": [q, state.raw_query]}}
q = authz_filter(q, state.authz, roles=True)
aggs = {'scoped': {'global': {}, 'aggs': {}}}
if facets:
facets = list(state.facet_names)
if 'collections' in facets:
aggs = facet_collections(state, q, aggs)
facets.remove('collections')
aggs = aggregate(state, q, aggs, facets)
if state.sort == 'doc_count':
sort = [{'doc_count': 'desc'}, '_score']
elif state.sort == 'score':
sort = ['_score', {'name_sort': 'asc'}]
else:
sort = [{'name_sort': 'asc'}]
# pprint(q)
q = {
'sort': sort,
'query': filter_query(q, state.filters),
'aggregations': aggs,
'size': state.limit,
'from': state.offset,
'_source': fields or DEFAULT_FIELDS
}
result, hits, output = execute_basic(TYPE_ENTITY, q)
output['facets'] = parse_facet_result(state, result)
sub_queries = []
for doc in hits.get('hits', []):
entity = doc.get('_source')
entity['id'] = doc.get('_id')
entity['score'] = doc.get('_score')
entity['api_url'] = url_for('entities_api.view', id=doc.get('_id'))
output['results'].append(entity)
sq = {'term': {'entities.id': entity['id']}}
sq = add_filter(sq, {
'terms': {'collection_id': state.authz.collections_read}
})
sq = {'size': 0, 'query': sq}
sub_queries.append(json.dumps({}))
sub_queries.append(json.dumps(sq))
if doc_counts and len(sub_queries):
# Get the number of matching documents for each entity.
body = '\n'.join(sub_queries)
res = es.msearch(index=es_index, doc_type=TYPE_DOCUMENT, body=body)
for (entity, res) in zip(output['results'], res.get('responses')):
entity['doc_count'] = res.get('hits', {}).get('total')
return output
def load_entity(entity_id):
"""Load a single entity by ID."""
result = es.get(index=es_index, doc_type=TYPE_ENTITY, id=entity_id,
ignore=[404])
entity = result.get('_source')
if result.get('found') is False or entity is None:
return
entity.pop('text', None)
entity['id'] = result.get('_id')
return entity
def facet_collections(state, q, aggs):
filters = state.filters
filters['collection_id'] = state.authz.collections_read
aggs['scoped']['aggs']['collections'] = {
'filter': filter_query(q, filters),
'aggs': {
'collections': {
'terms': {'field': 'collection_id', 'size': state.facet_size}
}
}
}
return aggs
def suggest_entities(prefix, authz, min_count=0, schemas=None, size=5):
"""Auto-complete API."""
options = []
if prefix is not None and len(prefix.strip()):
q = {
'match_phrase_prefix': {'name': prefix.strip()}
}
if min_count > 0:
q = add_filter(q, {'range': {'doc_count': {'gte': min_count}}})
if schemas is not None and len(schemas):
q = add_filter(q, {'terms': {'schema': schemas}})
# TODO: is this correct? should we allow filter by dataset entities?
q = add_filter(q, {'terms': {'collection_id': authz.collections_read}})
q = {
'size': size,
'sort': [{'doc_count': 'desc'}, '_score'],
'query': q,
'_source': ['name', 'schema', 'fingerprints', 'doc_count']
}
ref = ascii_text(prefix)
result = es.search(index=es_index, doc_type=TYPE_ENTITY, body=q)
for res in result.get('hits', {}).get('hits', []):
ent = res.get('_source')
terms = [ascii_text(t) for t in ent.pop('fingerprints', [])]
ent['match'] = ref in terms
ent['score'] = res.get('_score')
ent['id'] = res.get('_id')
options.append(ent)
return {
'prefix': prefix,
'results': options
}
def similar_entities(entity, state):
"""Merge suggestions API."""
required = []
boosters = []
must = None
entity_ids = entity.get('ids') or [entity.get('id')]
# search for fingerprints
for fp in entity.get('fingerprints', []):
required.append(multi_match(fp, ['fingerprints'], 1))
if not state.getbool('strict', False):
# broaden search to similar names
for name in entity.get('names', []):
required.append(multi_match(name, ['names', 'text'], 1))
# make it mandatory to have either a fingerprint or name match
must = {"bool": {"should": required, "minimum_should_match": 1}}
# boost by "contributing criteria"
for field in ['dates', 'countries', 'addresses', 'schemata']:
for val in entity.get(field, []):
boosters.append(multi_match(val, [field]))
# filter types which cannot be resolved via fuzzy matching.
nonfuzzy = [s.name for s in schemata if not s.fuzzy]
state.raw_query = {
"bool": {
"should": boosters,
"must": must,
"must_not": [
{"ids": {"values": entity_ids}},
{"terms": {"schema": nonfuzzy}},
]
}
}
# pprint(state.raw_query)
return entities_query(state)
def get_dataset_countries(dataset_name):
"""Create a list of the top 300 countries mentioned in a dataset."""
q = {'term': {'dataset': dataset_name}}
aggs = {'countries': {'terms': {'field': 'countries', 'size': 300}}}
q = {'size': 0, 'query': q, 'aggregations': aggs}
result = es.search(index=es_index, doc_type=TYPE_ENTITY, body=q)
result = result.get('aggregations', {}).get('countries', {})
return [b.get('key') for b in result.get('buckets', [])]
|
GGXH/python_koans | refs/heads/master | python_koans/python3/koans/about_asserts.py | 37 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(False) # This should be true
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(False, "This should be true -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(__, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = __
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = __
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert False
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "naval". What is it's class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "naval".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(__, "naval".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# http://bit.ly/__class__
|
googlefonts/pyfontaine | refs/heads/main | fontaine/charsets/noto_chars/notosansphagspa_regular.py | 2 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansPhagsPa-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #uniFEFF ????
chars.append(0x1801) #uni1801 MONGOLIAN ELLIPSIS
chars.append(0x1802) #uni1802 MONGOLIAN COMMA
chars.append(0x1803) #uni1803 MONGOLIAN FULL STOP
chars.append(0x1805) #uni1805 MONGOLIAN FOUR DOTS
chars.append(0x3007) #uni3007 IDEOGRAPHIC NUMBER ZERO
chars.append(0x3008) #uni3008 LEFT ANGLE BRACKET
chars.append(0x3009) #uni3009 RIGHT ANGLE BRACKET
chars.append(0x300A) #uni300A LEFT DOUBLE ANGLE BRACKET
chars.append(0x200B) #uni200B ZERO WIDTH SPACE
chars.append(0x200C) #uni200C ZERO WIDTH NON-JOINER
chars.append(0x000D) #uni000D ????
chars.append(0x200E) #uni200E LEFT-TO-RIGHT MARK
chars.append(0x200F) #uni200F RIGHT-TO-LEFT MARK
chars.append(0x3010) #uni3010 LEFT BLACK LENTICULAR BRACKET
chars.append(0x3011) #uni3011 RIGHT BLACK LENTICULAR BRACKET
chars.append(0x3014) #uni3014 LEFT TORTOISE SHELL BRACKET
chars.append(0x3015) #uni3015 RIGHT TORTOISE SHELL BRACKET
chars.append(0x3016) #uni3016 LEFT WHITE LENTICULAR BRACKET
chars.append(0x3017) #uni3017 RIGHT WHITE LENTICULAR BRACKET
chars.append(0x3018) #uni3018 LEFT WHITE TORTOISE SHELL BRACKET
chars.append(0x3019) #uni3019 RIGHT WHITE TORTOISE SHELL BRACKET
chars.append(0x301A) #uni301A LEFT WHITE SQUARE BRACKET
chars.append(0x301B) #uni301B RIGHT WHITE SQUARE BRACKET
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x0021) #exclam EXCLAMATION MARK
chars.append(0x2025) #twodotenleader TWO DOT LEADER
chars.append(0x2026) #ellipsis HORIZONTAL ELLIPSIS
chars.append(0x003F) #question QUESTION MARK
chars.append(0xA840) #uniA840 PHAGS-PA LETTER KA
chars.append(0xA841) #uniA841 PHAGS-PA LETTER KHA
chars.append(0xA842) #uniA842 PHAGS-PA LETTER GA
chars.append(0xA843) #uniA843 PHAGS-PA LETTER NGA
chars.append(0xA844) #uniA844 PHAGS-PA LETTER CA
chars.append(0xA845) #uniA845 PHAGS-PA LETTER CHA
chars.append(0xA846) #uniA846 PHAGS-PA LETTER JA
chars.append(0xA847) #uniA847 PHAGS-PA LETTER NYA
chars.append(0xA848) #uniA848 PHAGS-PA LETTER TA
chars.append(0xA849) #uniA849 PHAGS-PA LETTER THA
chars.append(0xA84A) #uniA84A PHAGS-PA LETTER DA
chars.append(0xA84B) #uniA84B PHAGS-PA LETTER NA
chars.append(0xA84C) #uniA84C PHAGS-PA LETTER PA
chars.append(0xA84D) #uniA84D PHAGS-PA LETTER PHA
chars.append(0xA84E) #uniA84E PHAGS-PA LETTER BA
chars.append(0x200D) #uni200D ZERO WIDTH JOINER
chars.append(0xA850) #uniA850 PHAGS-PA LETTER TSA
chars.append(0xA851) #uniA851 PHAGS-PA LETTER TSHA
chars.append(0xA852) #uniA852 PHAGS-PA LETTER DZA
chars.append(0xA853) #uniA853 PHAGS-PA LETTER WA
chars.append(0xA854) #uniA854 PHAGS-PA LETTER ZHA
chars.append(0x300E) #uni300E LEFT WHITE CORNER BRACKET
chars.append(0xA856) #uniA856 PHAGS-PA LETTER SMALL A
chars.append(0xA857) #uniA857 PHAGS-PA LETTER YA
chars.append(0xA858) #uniA858 PHAGS-PA LETTER RA
chars.append(0xA859) #uniA859 PHAGS-PA LETTER LA
chars.append(0xA85A) #uniA85A PHAGS-PA LETTER SHA
chars.append(0xA85B) #uniA85B PHAGS-PA LETTER SA
chars.append(0xA85C) #uniA85C PHAGS-PA LETTER HA
chars.append(0xA85D) #uniA85D PHAGS-PA LETTER A
chars.append(0xA85E) #uniA85E PHAGS-PA LETTER I
chars.append(0xA85F) #uniA85F PHAGS-PA LETTER U
chars.append(0xA860) #uniA860 PHAGS-PA LETTER E
chars.append(0xA861) #uniA861 PHAGS-PA LETTER O
chars.append(0xA862) #uniA862 PHAGS-PA LETTER QA
chars.append(0xA863) #uniA863 PHAGS-PA LETTER XA
chars.append(0xA864) #uniA864 PHAGS-PA LETTER FA
chars.append(0xA865) #uniA865 PHAGS-PA LETTER GGA
chars.append(0xA866) #uniA866 PHAGS-PA LETTER EE
chars.append(0xA867) #uniA867 PHAGS-PA SUBJOINED LETTER WA
chars.append(0xA868) #uniA868 PHAGS-PA SUBJOINED LETTER YA
chars.append(0xA869) #uniA869 PHAGS-PA LETTER TTA
chars.append(0xA86A) #uniA86A PHAGS-PA LETTER TTHA
chars.append(0xA86B) #uniA86B PHAGS-PA LETTER DDA
chars.append(0xA86C) #uniA86C PHAGS-PA LETTER NNA
chars.append(0xA86D) #uniA86D PHAGS-PA LETTER ALTERNATE YA
chars.append(0xA86E) #uniA86E PHAGS-PA LETTER VOICELESS SHA
chars.append(0xA86F) #uniA86F PHAGS-PA LETTER VOICED HA
chars.append(0xA870) #uniA870 PHAGS-PA LETTER ASPIRATED FA
chars.append(0xA871) #uniA871 PHAGS-PA SUBJOINED LETTER RA
chars.append(0xA872) #uniA872 PHAGS-PA SUPERFIXED LETTER RA
chars.append(0xA873) #uniA873 PHAGS-PA LETTER CANDRABINDU
chars.append(0xA874) #uniA874 PHAGS-PA SINGLE HEAD MARK
chars.append(0xA875) #uniA875 PHAGS-PA DOUBLE HEAD MARK
chars.append(0xA876) #uniA876 PHAGS-PA MARK SHAD
chars.append(0xA877) #uniA877 PHAGS-PA MARK DOUBLE SHAD
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x300B) #uni300B RIGHT DOUBLE ANGLE BRACKET
chars.append(0x300C) #uni300C LEFT CORNER BRACKET
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
chars.append(0x300D) #uni300D RIGHT CORNER BRACKET
chars.append(0x300F) #uni300F RIGHT WHITE CORNER BRACKET
chars.append(0x3001) #uni3001 IDEOGRAPHIC COMMA
chars.append(0x3002) #uni3002 IDEOGRAPHIC FULL STOP
chars.append(0x25CC) #uni25CC DOTTED CIRCLE
chars.append(0xA84F) #uniA84F PHAGS-PA LETTER MA
chars.append(0xFE00) #uniFE00 VARIATION SELECTOR-1
chars.append(0xA855) #uniA855 PHAGS-PA LETTER ZA
return chars
|
h3llrais3r/SickRage | refs/heads/master | lib/requests/packages/chardet/jisfreq.py | 342 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JIS_CHAR_TO_FREQ_ORDER = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
)
|
airodactyl/qutebrowser | refs/heads/master | tests/end2end/features/conftest.py | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Steps for bdd-like tests."""
import os
import re
import sys
import time
import json
import logging
import collections
import textwrap
import pytest
import pytest_bdd as bdd
from qutebrowser.utils import log, utils
from qutebrowser.browser import pdfjs
from helpers import utils as testutils
def _get_echo_exe_path():
"""Return the path to an echo-like command, depending on the system.
Return:
Path to the "echo"-utility.
"""
if utils.is_windows:
return os.path.join(testutils.abs_datapath(), 'userscripts',
'echo.bat')
else:
return 'echo'
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Add a BDD section to the test output."""
outcome = yield
if call.when not in ['call', 'teardown']:
return
report = outcome.get_result()
if report.passed:
return
if (not hasattr(report.longrepr, 'addsection') or
not hasattr(report, 'scenario')):
# In some conditions (on macOS and Windows it seems), report.longrepr
# is actually a tuple. This is handled similarly in pytest-qt too.
#
# Since this hook is invoked for any test, we also need to skip it for
# non-BDD ones.
return
if sys.stdout.isatty() and item.config.getoption('--color') != 'no':
colors = {
'failed': log.COLOR_ESCAPES['red'],
'passed': log.COLOR_ESCAPES['green'],
'keyword': log.COLOR_ESCAPES['cyan'],
'reset': log.RESET_ESCAPE,
}
else:
colors = {
'failed': '',
'passed': '',
'keyword': '',
'reset': '',
}
output = []
output.append("{kw_color}Feature:{reset} {name}".format(
kw_color=colors['keyword'],
name=report.scenario['feature']['name'],
reset=colors['reset'],
))
output.append(
" {kw_color}Scenario:{reset} {name} ({filename}:{line})".format(
kw_color=colors['keyword'],
name=report.scenario['name'],
filename=report.scenario['feature']['rel_filename'],
line=report.scenario['line_number'],
reset=colors['reset'])
)
for step in report.scenario['steps']:
output.append(
" {kw_color}{keyword}{reset} {color}{name}{reset} "
"({duration:.2f}s)".format(
kw_color=colors['keyword'],
color=colors['failed'] if step['failed'] else colors['passed'],
keyword=step['keyword'],
name=step['name'],
duration=step['duration'],
reset=colors['reset'])
)
report.longrepr.addsection("BDD scenario", '\n'.join(output))
## Given
@bdd.given(bdd.parsers.parse("I set {opt} to {value}"))
def set_setting_given(quteproc, server, opt, value):
"""Set a qutebrowser setting.
This is available as "Given:" step so it can be used as "Background:".
"""
if value == '<empty>':
value = ''
value = value.replace('(port)', str(server.port))
quteproc.set_setting(opt, value)
@bdd.given(bdd.parsers.parse("I open {path}"))
def open_path_given(quteproc, path):
"""Open a URL.
This is available as "Given:" step so it can be used as "Background:".
It always opens a new tab, unlike "When I open ..."
"""
quteproc.open_path(path, new_tab=True)
@bdd.given(bdd.parsers.parse("I run {command}"))
def run_command_given(quteproc, command):
"""Run a qutebrowser command.
This is available as "Given:" step so it can be used as "Background:".
"""
quteproc.send_cmd(command)
@bdd.given("I have a fresh instance")
def fresh_instance(quteproc):
"""Restart qutebrowser instance for tests needing a fresh state."""
quteproc.terminate()
quteproc.start()
@bdd.given("I clean up open tabs")
def clean_open_tabs(quteproc):
"""Clean up open windows and tabs."""
quteproc.set_setting('tabs.last_close', 'blank')
quteproc.send_cmd(':window-only')
quteproc.send_cmd(':tab-only --force')
quteproc.send_cmd(':tab-close --force')
quteproc.wait_for_load_finished_url('about:blank')
@bdd.given('pdfjs is available')
def pdfjs_available():
if not pdfjs.is_available():
pytest.skip("No pdfjs installation found.")
## When
@bdd.when(bdd.parsers.parse("I open {path}"))
def open_path(quteproc, server, path):
"""Open a URL.
- If used like "When I open ... in a new tab", the URL is opened in a new
tab.
- With "... in a new window", it's opened in a new window.
- With "... in a private window" it's opened in a new private window.
- With "... as a URL", it's opened according to new_instance_open_target.
"""
path = path.replace('(port)', str(server.port))
new_tab = False
new_bg_tab = False
new_window = False
private = False
as_url = False
wait = True
new_tab_suffix = ' in a new tab'
new_bg_tab_suffix = ' in a new background tab'
new_window_suffix = ' in a new window'
private_suffix = ' in a private window'
do_not_wait_suffix = ' without waiting'
as_url_suffix = ' as a URL'
while True:
if path.endswith(new_tab_suffix):
path = path[:-len(new_tab_suffix)]
new_tab = True
elif path.endswith(new_bg_tab_suffix):
path = path[:-len(new_bg_tab_suffix)]
new_bg_tab = True
elif path.endswith(new_window_suffix):
path = path[:-len(new_window_suffix)]
new_window = True
elif path.endswith(private_suffix):
path = path[:-len(private_suffix)]
private = True
elif path.endswith(as_url_suffix):
path = path[:-len(as_url_suffix)]
as_url = True
elif path.endswith(do_not_wait_suffix):
path = path[:-len(do_not_wait_suffix)]
wait = False
else:
break
quteproc.open_path(path, new_tab=new_tab, new_bg_tab=new_bg_tab,
new_window=new_window, private=private, as_url=as_url,
wait=wait)
@bdd.when(bdd.parsers.parse("I set {opt} to {value}"))
def set_setting(quteproc, server, opt, value):
"""Set a qutebrowser setting."""
if value == '<empty>':
value = ''
value = value.replace('(port)', str(server.port))
quteproc.set_setting(opt, value)
@bdd.when(bdd.parsers.parse("I run {command}"))
def run_command(quteproc, server, tmpdir, command):
"""Run a qutebrowser command.
The suffix "with count ..." can be used to pass a count to the command.
"""
if 'with count' in command:
command, count = command.split(' with count ')
count = int(count)
else:
count = None
invalid_tag = ' (invalid command)'
if command.endswith(invalid_tag):
command = command[:-len(invalid_tag)]
invalid = True
else:
invalid = False
command = command.replace('(port)', str(server.port))
command = command.replace('(testdata)', testutils.abs_datapath())
command = command.replace('(tmpdir)', str(tmpdir))
command = command.replace('(dirsep)', os.sep)
command = command.replace('(echo-exe)', _get_echo_exe_path())
quteproc.send_cmd(command, count=count, invalid=invalid)
@bdd.when(bdd.parsers.parse("I reload"))
def reload(qtbot, server, quteproc, command):
"""Reload and wait until a new request is received."""
with qtbot.waitSignal(server.new_request):
quteproc.send_cmd(':reload')
@bdd.when(bdd.parsers.parse("I wait until {path} is loaded"))
def wait_until_loaded(quteproc, path):
"""Wait until the given path is loaded (as per qutebrowser log)."""
quteproc.wait_for_load_finished(path)
@bdd.when(bdd.parsers.re(r'I wait for (?P<is_regex>regex )?"'
r'(?P<pattern>[^"]+)" in the log(?P<do_skip> or skip '
r'the test)?'))
def wait_in_log(quteproc, is_regex, pattern, do_skip):
"""Wait for a given pattern in the qutebrowser log.
If used like "When I wait for regex ... in the log" the argument is treated
as regex. Otherwise, it's treated as a pattern (* can be used as wildcard).
"""
if is_regex:
pattern = re.compile(pattern)
line = quteproc.wait_for(message=pattern, do_skip=bool(do_skip))
line.expected = True
@bdd.when(bdd.parsers.re(r'I wait for the (?P<category>error|message|warning) '
r'"(?P<message>.*)"'))
def wait_for_message(quteproc, server, category, message):
"""Wait for a given statusbar message/error/warning."""
quteproc.log_summary('Waiting for {} "{}"'.format(category, message))
expect_message(quteproc, server, category, message)
@bdd.when(bdd.parsers.parse("I wait {delay}s"))
def wait_time(quteproc, delay):
"""Sleep for the given delay."""
time.sleep(float(delay))
@bdd.when(bdd.parsers.re('I press the keys? "(?P<keys>[^"]*)"'))
def press_keys(quteproc, keys):
"""Send the given fake keys to qutebrowser."""
quteproc.press_keys(keys)
@bdd.when("selection is supported")
def selection_supported(qapp):
"""Skip the test if selection isn't supported."""
if not qapp.clipboard().supportsSelection():
pytest.skip("OS doesn't support primary selection!")
@bdd.when("selection is not supported")
def selection_not_supported(qapp):
"""Skip the test if selection is supported."""
if qapp.clipboard().supportsSelection():
pytest.skip("OS supports primary selection!")
@bdd.when(bdd.parsers.re(r'I put "(?P<content>.*)" into the '
r'(?P<what>primary selection|clipboard)'))
def fill_clipboard(quteproc, server, what, content):
content = content.replace('(port)', str(server.port))
content = content.replace(r'\n', '\n')
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(content))
@bdd.when(bdd.parsers.re(r'I put the following lines into the '
r'(?P<what>primary selection|clipboard):\n'
r'(?P<content>.+)$', flags=re.DOTALL))
def fill_clipboard_multiline(quteproc, server, what, content):
fill_clipboard(quteproc, server, what, textwrap.dedent(content))
@bdd.when(bdd.parsers.parse('I hint with args "{args}"'))
def hint(quteproc, args):
quteproc.send_cmd(':hint {}'.format(args))
quteproc.wait_for(message='hints: *')
@bdd.when(bdd.parsers.parse('I hint with args "{args}" and follow {letter}'))
def hint_and_follow(quteproc, args, letter):
args = args.replace('(testdata)', testutils.abs_datapath())
quteproc.send_cmd(':hint {}'.format(args))
quteproc.wait_for(message='hints: *')
quteproc.send_cmd(':follow-hint {}'.format(letter))
@bdd.when("I wait until the scroll position changed")
def wait_scroll_position(quteproc):
quteproc.wait_scroll_pos_changed()
@bdd.when(bdd.parsers.parse("I wait until the scroll position changed to "
"{x}/{y}"))
def wait_scroll_position_arg(quteproc, x, y):
quteproc.wait_scroll_pos_changed(x, y)
@bdd.when(bdd.parsers.parse('I wait for the javascript message "{message}"'))
def javascript_message_when(quteproc, message):
"""Make sure the given message was logged via javascript."""
quteproc.wait_for_js(message)
@bdd.when("I clear SSL errors")
def clear_ssl_errors(request, quteproc):
if request.config.webengine:
quteproc.terminate()
quteproc.start()
else:
quteproc.send_cmd(':debug-clear-ssl-errors')
## Then
@bdd.then(bdd.parsers.parse("{path} should be loaded"))
def path_should_be_loaded(quteproc, path):
"""Make sure the given path was loaded according to the log.
This is usually the better check compared to "should be requested" as the
page could be loaded from local cache.
"""
quteproc.wait_for_load_finished(path)
@bdd.then(bdd.parsers.parse("{path} should be requested"))
def path_should_be_requested(server, path):
"""Make sure the given path was loaded from the webserver."""
server.wait_for(verb='GET', path='/' + path)
@bdd.then(bdd.parsers.parse("The requests should be:\n{pages}"))
def list_of_requests(server, pages):
"""Make sure the given requests were done from the webserver."""
expected_requests = [server.ExpectedRequest('GET', '/' + path.strip())
for path in pages.split('\n')]
actual_requests = server.get_requests()
assert actual_requests == expected_requests
@bdd.then(bdd.parsers.parse("The unordered requests should be:\n{pages}"))
def list_of_requests_unordered(server, pages):
"""Make sure the given requests were done (in no particular order)."""
expected_requests = [server.ExpectedRequest('GET', '/' + path.strip())
for path in pages.split('\n')]
actual_requests = server.get_requests()
# Requests are not hashable, we need to convert to ExpectedRequests
actual_requests = [server.ExpectedRequest.from_request(req)
for req in actual_requests]
assert (collections.Counter(actual_requests) ==
collections.Counter(expected_requests))
@bdd.then(bdd.parsers.re(r'the (?P<category>error|message|warning) '
r'"(?P<message>.*)" should be shown'))
def expect_message(quteproc, server, category, message):
"""Expect the given message in the qutebrowser log."""
category_to_loglevel = {
'message': logging.INFO,
'error': logging.ERROR,
'warning': logging.WARNING,
}
message = message.replace('(port)', str(server.port))
quteproc.mark_expected(category='message',
loglevel=category_to_loglevel[category],
message=message)
@bdd.then(bdd.parsers.re(r'(?P<is_regex>regex )?"(?P<pattern>[^"]+)" should '
r'be logged( with level (?P<loglevel>.*))?'))
def should_be_logged(quteproc, server, is_regex, pattern, loglevel):
"""Expect the given pattern on regex in the log."""
if is_regex:
pattern = re.compile(pattern)
else:
pattern = pattern.replace('(port)', str(server.port))
args = {
'message': pattern,
}
if loglevel:
args['loglevel'] = getattr(logging, loglevel.upper())
line = quteproc.wait_for(**args)
line.expected = True
@bdd.then(bdd.parsers.parse('"{pattern}" should not be logged'))
def ensure_not_logged(quteproc, pattern):
"""Make sure the given pattern was *not* logged."""
quteproc.ensure_not_logged(message=pattern)
@bdd.then(bdd.parsers.parse('the javascript message "{message}" should be '
'logged'))
def javascript_message_logged(quteproc, message):
"""Make sure the given message was logged via javascript."""
quteproc.wait_for_js(message)
@bdd.then(bdd.parsers.parse('the javascript message "{message}" should not be '
'logged'))
def javascript_message_not_logged(quteproc, message):
"""Make sure the given message was *not* logged via javascript."""
quteproc.ensure_not_logged(category='js',
message='[*] {}'.format(message))
@bdd.then(bdd.parsers.parse("The session should look like:\n{expected}"))
def compare_session(request, quteproc, expected):
"""Compare the current sessions against the given template.
partial_compare is used, which means only the keys/values listed will be
compared.
"""
quteproc.compare_session(expected)
@bdd.then("no crash should happen")
def no_crash():
"""Don't do anything.
This is actually a NOP as a crash is already checked in the log.
"""
time.sleep(0.5)
@bdd.then(bdd.parsers.parse("the header {header} should be set to {value}"))
def check_header(quteproc, header, value):
"""Check if a given header is set correctly.
This assumes we're on the server header page.
"""
content = quteproc.get_content()
data = json.loads(content)
print(data)
if value == '<unset>':
assert header not in data['headers']
else:
actual = data['headers'][header]
assert testutils.pattern_match(pattern=value, value=actual)
@bdd.then(bdd.parsers.parse('the page should contain the html "{text}"'))
def check_contents_html(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content(plain=False)
assert text in content
@bdd.then(bdd.parsers.parse('the page should contain the plaintext "{text}"'))
def check_contents_plain(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content().strip()
assert text in content
@bdd.then(bdd.parsers.parse('the page should not contain the plaintext '
'"{text}"'))
def check_not_contents_plain(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content().strip()
assert text not in content
@bdd.then(bdd.parsers.parse('the json on the page should be:\n{text}'))
def check_contents_json(quteproc, text):
"""Check the current page's content as json."""
content = quteproc.get_content().strip()
expected = json.loads(text)
actual = json.loads(content)
assert actual == expected
@bdd.then(bdd.parsers.parse("the following tabs should be open:\n{tabs}"))
def check_open_tabs(quteproc, request, tabs):
"""Check the list of open tabs in the session.
This is a lightweight alternative for "The session should look like: ...".
It expects a list of URLs, with an optional "(active)" suffix.
"""
session = quteproc.get_session()
active_suffix = ' (active)'
pinned_suffix = ' (pinned)'
tabs = tabs.splitlines()
assert len(session['windows']) == 1
assert len(session['windows'][0]['tabs']) == len(tabs)
# If we don't have (active) anywhere, don't check it
has_active = any(active_suffix in line for line in tabs)
has_pinned = any(pinned_suffix in line for line in tabs)
for i, line in enumerate(tabs):
line = line.strip()
assert line.startswith('- ')
line = line[2:] # remove "- " prefix
active = False
pinned = False
while line.endswith(active_suffix) or line.endswith(pinned_suffix):
if line.endswith(active_suffix):
# active
line = line[:-len(active_suffix)]
active = True
else:
# pinned
line = line[:-len(pinned_suffix)]
pinned = True
session_tab = session['windows'][0]['tabs'][i]
current_page = session_tab['history'][-1]
assert current_page['url'] == quteproc.path_to_url(line)
if active:
assert session_tab['active']
elif has_active:
assert 'active' not in session_tab
if pinned:
assert current_page['pinned']
elif has_pinned:
assert not current_page['pinned']
@bdd.then(bdd.parsers.re(r'the (?P<what>primary selection|clipboard) should '
r'contain "(?P<content>.*)"'))
def clipboard_contains(quteproc, server, what, content):
expected = content.replace('(port)', str(server.port))
expected = expected.replace('\\n', '\n')
expected = expected.replace('(linesep)', os.linesep)
quteproc.wait_for(message='Setting fake {}: {}'.format(
what, json.dumps(expected)))
@bdd.then(bdd.parsers.parse('the clipboard should contain:\n{content}'))
def clipboard_contains_multiline(quteproc, server, content):
expected = textwrap.dedent(content).replace('(port)', str(server.port))
quteproc.wait_for(message='Setting fake clipboard: {}'.format(
json.dumps(expected)))
@bdd.then("qutebrowser should quit")
def should_quit(qtbot, quteproc):
quteproc.wait_for_quit()
def _get_scroll_values(quteproc):
data = quteproc.get_session()
pos = data['windows'][0]['tabs'][0]['history'][-1]['scroll-pos']
return (pos['x'], pos['y'])
@bdd.then(bdd.parsers.re(r"the page should be scrolled "
r"(?P<direction>horizontally|vertically)"))
def check_scrolled(quteproc, direction):
quteproc.wait_scroll_pos_changed()
x, y = _get_scroll_values(quteproc)
if direction == 'horizontally':
assert x > 0
assert y == 0
else:
assert x == 0
assert y > 0
@bdd.then("the page should not be scrolled")
def check_not_scrolled(request, quteproc):
x, y = _get_scroll_values(quteproc)
assert x == 0
assert y == 0
@bdd.then(bdd.parsers.parse("the option {option} should be set to {value}"))
def check_option(quteproc, option, value):
actual_value = quteproc.get_setting(option)
assert actual_value == value
|
pair-code/what-if-tool | refs/heads/master | witwidget/notebook/colab/wit.py | 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import tensorflow as tf
from IPython import display
from google.colab import output
from witwidget.notebook import base
# Python functions for requests from javascript.
def infer_examples(wit_id):
WitWidget.widgets[wit_id].infer()
output.register_callback('notebook.InferExamples', infer_examples)
def delete_example(wit_id, index):
WitWidget.widgets[wit_id].delete_example(index)
output.register_callback('notebook.DeleteExample', delete_example)
def duplicate_example(wit_id, index):
WitWidget.widgets[wit_id].duplicate_example(index)
output.register_callback('notebook.DuplicateExample', duplicate_example)
def update_example(wit_id, index, example):
WitWidget.widgets[wit_id].update_example(index, example)
output.register_callback('notebook.UpdateExample', update_example)
def get_eligible_features(wit_id):
WitWidget.widgets[wit_id].get_eligible_features()
output.register_callback('notebook.GetEligibleFeatures', get_eligible_features)
def sort_eligible_features(wit_id, details):
WitWidget.widgets[wit_id].sort_eligible_features(details)
output.register_callback('notebook.SortEligibleFeatures', sort_eligible_features)
def infer_mutants(wit_id, details):
WitWidget.widgets[wit_id].infer_mutants(details)
output.register_callback('notebook.InferMutants', infer_mutants)
def compute_custom_distance(wit_id, index, callback_name, params):
WitWidget.widgets[wit_id].compute_custom_distance(index, callback_name,
params)
output.register_callback('notebook.ComputeCustomDistance',
compute_custom_distance)
# HTML/javascript for the WIT frontend.
WIT_HTML = """
<script>
(function() {{
const id = {id};
const wit = document.querySelector("#wit");
wit.style.height = '{height}px';
let mutantFeature = null;
let stagedExamples = [];
let prevExampleCountdown = 0;
let stagedInferences = {{}};
let prevInferencesCountdown = 0;
// Listeners from WIT element events which pass requests to python.
wit.addEventListener("infer-examples", e => {{
google.colab.kernel.invokeFunction(
'notebook.InferExamples', [id], {{}});
}});
wit.addEventListener("compute-custom-distance", e => {{
google.colab.kernel.invokeFunction(
'notebook.ComputeCustomDistance',
[id, e.detail.index, e.detail.callback, e.detail.params],
{{}});
}});
wit.addEventListener("delete-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DeleteExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("duplicate-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DuplicateExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("update-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.UpdateExample',
[id, e.detail.index, e.detail.example],
{{}});
}});
wit.addEventListener('get-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.GetEligibleFeatures', [id], {{}});
}});
wit.addEventListener('infer-mutants', e => {{
mutantFeature = e.detail.feature_name;
google.colab.kernel.invokeFunction(
'notebook.InferMutants', [id, e.detail], {{}});
}});
wit.addEventListener('sort-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.SortEligibleFeatures', [id, e.detail], {{}});
}});
// Javascript callbacks called by python code to communicate with WIT
// Polymer element.
window.backendError = error => {{
wit.handleError(error.msg);
}};
window.inferenceCallback = res => {{
// If starting a new set of data, reset the staged results.
if (res.countdown >= prevInferencesCountdown) {{
stagedInferences = res.inferences;
}}
prevInferencesCountdown = res.countdown;
for (let i = 0; i < res.results.length; i++) {{
if (wit.modelType == 'classification') {{
stagedInferences.inferences.results[i].classificationResult.classifications.push(...res.results[i]);
}}
else {{
stagedInferences.inferences.results[i].regressionResult.regressions.push(...res.results[i]);
}}
const extras = res.extra[i];
for (let key of Object.keys(extras)) {{
stagedInferences.extra_outputs[i][key].push(...extras[key]);
}}
}}
stagedInferences.inferences.indices.push(...res.indices);
// If this is the final chunk, set the staged results.
if (res.countdown === 0) {{
wit.labelVocab = stagedInferences.label_vocab;
wit.inferences = stagedInferences.inferences;
wit.extraOutputs = {{indices: wit.inferences.indices,
extra: stagedInferences.extra_outputs}};
}}
}};
window.distanceCallback = callbackDict => {{
wit.invokeCustomDistanceCallback(callbackDict);
}};
window.spriteCallback = spriteUrl => {{
if (!wit.updateSprite) {{
requestAnimationFrame(() => window.spriteCallback(spriteUrl));
return;
}}
wit.hasSprite = true;
wit.localAtlasUrl = spriteUrl;
wit.updateSprite();
}};
window.eligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.sortEligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.inferMutantsCallback = chartInfo => {{
wit.makeChartForFeature(chartInfo.chartType, mutantFeature,
chartInfo.data);
}};
window.configCallback = config => {{
if (!wit.updateNumberOfModels) {{
requestAnimationFrame(() => window.configCallback(config));
return;
}}
if ('inference_address' in config) {{
let addresses = config['inference_address'];
if ('inference_address_2' in config) {{
addresses += ',' + config['inference_address_2'];
}}
wit.inferenceAddress = addresses;
}}
if ('model_name' in config) {{
let names = config['model_name'];
if ('model_name_2' in config) {{
names += ',' + config['model_name_2'];
}}
wit.modelName = names;
}}
if ('model_type' in config) {{
wit.modelType = config['model_type'];
}}
if ('are_sequence_examples' in config) {{
wit.sequenceExamples = config['are_sequence_examples'];
}}
if ('max_classes' in config) {{
wit.maxInferenceEntriesPerRun = config['max_classes'];
}}
if ('multiclass' in config) {{
wit.multiClass = config['multiclass'];
}}
wit.updateNumberOfModels();
if ('target_feature' in config) {{
wit.selectedLabelFeature = config['target_feature'];
}}
if ('uses_custom_distance_fn' in config) {{
wit.customDistanceFunctionSet = true;
}} else {{
wit.customDistanceFunctionSet = false;
}}
}};
window.updateExamplesCallback = res => {{
// If starting a new set of data, reset the staged examples.
if (res.countdown >= prevExampleCountdown) {{
stagedExamples = [];
}}
prevExampleCountdown = res.countdown;
stagedExamples.push(...res.examples);
if (res.countdown === 0) {{
// If this is the final chunk, set the staged examples.
window.commitUpdatedExamples();
}}
}};
window.commitUpdatedExamples = () => {{
if (!wit.updateExampleContents) {{
requestAnimationFrame(() => window.commitUpdatedExamples());
return;
}}
wit.updateExampleContents(stagedExamples, false);
if (wit.localAtlasUrl) {{
window.spriteCallback(wit.localAtlasUrl);
}}
}};
// BroadcastChannels allows examples to be updated by a call from an
// output cell that isn't the cell hosting the WIT widget.
const channelName = 'updateExamples' + id;
const updateExampleListener = new BroadcastChannel(channelName);
updateExampleListener.onmessage = msg => {{
window.updateExamplesCallback(msg.data);
}};
}})();
</script>
"""
class WitWidget(base.WitWidgetBase):
"""WIT widget for colab."""
# Static instance list of constructed WitWidgets so python global functions
# can call into instances of this object
widgets = []
# Static instance index to keep track of ID number of each constructed
# WitWidget.
index = 0
def __init__(self, config_builder, height=1000, delay_rendering=False):
"""Constructor for colab notebook WitWidget.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
height: Optional height in pixels for WIT to occupy. Defaults to 1000.
delay_rendering: Optional. If true, then do not render WIT on
construction. Instead, only render when render method is called. Defaults
to False.
"""
self._rendering_complete = False
self.id = WitWidget.index
self.height = height
self.set_examples_in_progress = False
# How large of example slices should be sent to the front-end at a time,
# in order to avoid issues with kernel crashes on large messages.
self.SLICE_SIZE = 10000
base.WitWidgetBase.__init__(self, config_builder)
# Add this instance to the static instance list.
WitWidget.widgets.append(self)
if not delay_rendering:
self.render()
# Increment the static instance WitWidget index counter
WitWidget.index += 1
def render(self):
"""Render the widget to the display."""
# Display WIT Polymer element.
display.display(display.HTML(self._get_element_html()))
display.display(display.HTML(
WIT_HTML.format(height=self.height, id=self.id)))
# Send the provided config and examples to JS.
output.eval_js("""configCallback({config})""".format(
config=json.dumps(self.config)))
self.set_examples_in_progress = True
self._set_examples_looper('updateExamplesCallback({data})')
self.set_examples_in_progress = False
self._generate_sprite()
self._rendering_complete = True
def _get_element_html(self):
return tf.io.gfile.GFile(
'/usr/local/share/jupyter/nbextensions/wit-widget/wit_jupyter.html'
).read()
def set_examples(self, examples):
if self.set_examples_in_progress:
print('Cannot set examples while transfer is in progress.')
return
self.set_examples_in_progress = True
base.WitWidgetBase.set_examples(self, examples)
# If this is called after rendering, use a BroadcastChannel to send
# the updated examples to the visualization. Inside of the ctor, no action
# is necessary as the rendering handles all communication.
if self._rendering_complete:
# Use BroadcastChannel to allow this call to be made in a separate colab
# cell from the cell that displays WIT.
channel_str = """(new BroadcastChannel('updateExamples{}'))""".format(
self.id)
eval_js_str = channel_str + '.postMessage({data})'
self._set_examples_looper(eval_js_str)
self._generate_sprite()
self.set_examples_in_progress = False
def _set_examples_looper(self, eval_js_str):
# Send the set examples to JS in chunks.
num_pieces = math.ceil(len(self.examples) / self.SLICE_SIZE)
i = 0
while num_pieces > 0:
num_pieces -= 1
exs = self.examples[i : i + self.SLICE_SIZE]
piece = {'examples': exs, 'countdown': num_pieces}
output.eval_js(eval_js_str.format(data=json.dumps(piece)))
i += self.SLICE_SIZE
def infer(self):
try:
inferences = base.WitWidgetBase.infer_impl(self)
# Parse out the inferences from the returned stucture and empty the
# structure of contents, keeping its nested structure.
# Chunks of the inference results will be sent to the front-end and
# re-assembled.
indices = inferences['inferences']['indices'][:]
inferences['inferences']['indices'] = []
res2 = []
extra = {}
extra2 = {}
model_inference = inferences['inferences']['results'][0]
if ('extra_outputs' in inferences and len(inferences['extra_outputs']) and
inferences['extra_outputs'][0]):
for key in inferences['extra_outputs'][0]:
extra[key] = inferences['extra_outputs'][0][key][:]
inferences['extra_outputs'][0][key] = []
if 'classificationResult' in model_inference:
res = model_inference['classificationResult']['classifications'][:]
model_inference['classificationResult']['classifications'] = []
else:
res = model_inference['regressionResult']['regressions'][:]
model_inference['regressionResult']['regressions'] = []
if len(inferences['inferences']['results']) > 1:
if ('extra_outputs' in inferences and
len(inferences['extra_outputs']) > 1 and
inferences['extra_outputs'][1]):
for key in inferences['extra_outputs'][1]:
extra2[key] = inferences['extra_outputs'][1][key][:]
inferences['extra_outputs'][1][key] = []
model_2_inference = inferences['inferences']['results'][1]
if 'classificationResult' in model_2_inference:
res2 = model_2_inference['classificationResult']['classifications'][:]
model_2_inference['classificationResult']['classifications'] = []
else:
res2 = model_2_inference['regressionResult']['regressions'][:]
model_2_inference['regressionResult']['regressions'] = []
i = 0
num_pieces = math.ceil(len(indices) / self.SLICE_SIZE)
# Loop over each piece to send.
while num_pieces > 0:
num_pieces -= 1
piece = [res[i : i + self.SLICE_SIZE]]
extra_piece = [{}]
for key in extra:
extra_piece[0][key] = extra[key][i : i + self.SLICE_SIZE]
if res2:
piece.append(res2[i : i + self.SLICE_SIZE])
extra_piece.append({})
for key in extra2:
extra_piece[1][key] = extra2[key][i : i + self.SLICE_SIZE]
ind_piece = indices[i : i + self.SLICE_SIZE]
data = {'results': piece, 'indices': ind_piece, 'extra': extra_piece,
'countdown': num_pieces}
# For the first segment to send, also send the blank inferences
# structure to be filled in. This was cleared of contents above but is
# used to maintain the nested structure of the results.
if i == 0:
data['inferences'] = inferences
output.eval_js("""inferenceCallback({data})""".format(
data=json.dumps(data)))
i += self.SLICE_SIZE
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def delete_example(self, index):
self.examples.pop(index)
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self._generate_sprite()
def update_example(self, index, example):
self.updated_example_indices.add(index)
self.examples[index] = example
self._generate_sprite()
def duplicate_example(self, index):
self.examples.append(self.examples[index])
self.updated_example_indices.add(len(self.examples) - 1)
self._generate_sprite()
def compute_custom_distance(self, index, callback_fn, params):
try:
distances = base.WitWidgetBase.compute_custom_distance_impl(
self, index, params['distanceParams'])
callback_dict = {
'distances': distances,
'exInd': index,
'funId': callback_fn,
'params': params['callbackParams']
}
output.eval_js("""distanceCallback({callback_dict})""".format(
callback_dict=json.dumps(callback_dict)))
except Exception as e:
output.eval_js(
"""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def get_eligible_features(self):
features_list = base.WitWidgetBase.get_eligible_features_impl(self)
output.eval_js("""eligibleFeaturesCallback({features_list})""".format(
features_list=json.dumps(features_list)))
def infer_mutants(self, info):
try:
json_mapping = base.WitWidgetBase.infer_mutants_impl(self, info)
output.eval_js("""inferMutantsCallback({json_mapping})""".format(
json_mapping=json.dumps(json_mapping)))
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def sort_eligible_features(self, info):
try:
features_list = base.WitWidgetBase.sort_eligible_features_impl(self, info)
output.eval_js("""sortEligibleFeaturesCallback({features_list})""".format(
features_list=json.dumps(features_list)))
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def _generate_sprite(self):
sprite = base.WitWidgetBase.create_sprite(self)
if sprite is not None:
output.eval_js("""spriteCallback('{sprite}')""".format(sprite=sprite))
|
scallemang/rhineland-shopify-theme | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
sachinkumar123/approprate | refs/heads/master | cloudApp/moderator/apps.py | 1 | from __future__ import unicode_literals
from django.apps import AppConfig
class ModeratorConfig(AppConfig):
name = 'moderator'
|
Aaron0927/xen-4.2.1 | refs/heads/master | tools/xm-test/tests/help/01_help_basic_pos.py | 42 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Woody Marvel <marvel@us.ibm.com>
import re
from XmTestLib import *
status, output = traceCommand("xm help")
eyecatcher = "Usage:"
where = output.find(eyecatcher)
if where == -1:
FAIL("xm help: didn't see the usage string")
|
BlackPole/bp-enigma2 | refs/heads/master | lib/python/Plugins/SystemPlugins/VideoClippingSetup/plugin.py | 55 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSlider, getConfigListEntry
config.plugins.VideoClippingSetup = ConfigSubsection()
config.plugins.VideoClippingSetup.clip_left = ConfigInteger(default = 0)
config.plugins.VideoClippingSetup.clip_width = ConfigInteger(default = 720)
config.plugins.VideoClippingSetup.clip_top = ConfigInteger(default = 0)
config.plugins.VideoClippingSetup.clip_height = ConfigInteger(default = 576)
class VideoClippingCoordinates(Screen, ConfigListScreen):
skin = """
<screen position="0,0" size="e,e" title="Video clipping setup" backgroundColor="transparent">
<widget name="config" position="c-175,c-75" size="350,150" foregroundColor="black" backgroundColor="transparent" />
<ePixmap pixmap="skin_default/buttons/green.png" position="c-145,e-100" zPosition="0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/red.png" position="c+5,e-100" zPosition="0" size="140,40" alphatest="on" />
<widget name="ok" position="c-145,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="green" />
<widget name="cancel" position="c+5,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="red" />
</screen>"""
def __init__(self, session):
self.skin = VideoClippingCoordinates.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["ok"] = Button(_("OK"))
self["cancel"] = Button(_("Cancel"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
left = config.plugins.VideoClippingSetup.clip_left.value
width = config.plugins.VideoClippingSetup.clip_width.value
top = config.plugins.VideoClippingSetup.clip_top.value
height = config.plugins.VideoClippingSetup.clip_height.value
self.clip_step = ConfigSlider(default = 1, increment = 1, limits = (1, 20))
self.clip_left = ConfigSlider(default = left, increment = self.clip_step.value, limits = (0, 720))
self.clip_width = ConfigSlider(default = width, increment = self.clip_step.value, limits = (0, 720))
self.clip_top = ConfigSlider(default = top, increment = self.clip_step.value, limits = (0, 576))
self.clip_height = ConfigSlider(default = height, increment = self.clip_step.value, limits = (0, 576))
self.list.append(getConfigListEntry(_("stepsize"), self.clip_step))
self.list.append(getConfigListEntry(_("left"), self.clip_left))
self.list.append(getConfigListEntry(_("width"), self.clip_width))
self.list.append(getConfigListEntry(_("top"), self.clip_top))
self.list.append(getConfigListEntry(_("height"), self.clip_height))
self["config"].list = self.list
self["config"].l.setList(self.list)
def adjustStep(self):
self.clip_left.increment = self.clip_step.value
self.clip_width.increment = self.clip_step.value
self.clip_top.increment = self.clip_step.value
self.clip_height.increment = self.clip_step.value
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.adjustStep()
self.setPreviewPosition()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.adjustStep()
self.setPreviewPosition()
def setPreviewPosition(self):
setPosition(int(self.clip_left.value), int(self.clip_width.value), int(self.clip_top.value), int(self.clip_height.value))
def keyGo(self):
config.plugins.VideoClippingSetup.clip_left.value = self.clip_left.value
config.plugins.VideoClippingSetup.clip_width.value = self.clip_width.value
config.plugins.VideoClippingSetup.clip_top.value = self.clip_top.value
config.plugins.VideoClippingSetup.clip_height.value = self.clip_height.value
config.plugins.VideoClippingSetup.save()
self.close()
def keyCancel(self):
setConfiguredPosition()
self.close()
def setPosition(clip_left, clip_width, clip_top, clip_height):
if clip_left + clip_width > 720:
clip_width = 720 - clip_left
if clip_top + clip_height > 576:
clip_height = 576 - clip_top
try:
file = open("/proc/stb/vmpeg/0/clip_left", "w")
file.write('%X' % clip_left)
file.close()
file = open("/proc/stb/vmpeg/0/clip_width", "w")
file.write('%X' % clip_width)
file.close()
file = open("/proc/stb/vmpeg/0/clip_top", "w")
file.write('%X' % clip_top)
file.close()
file = open("/proc/stb/vmpeg/0/clip_height", "w")
file.write('%X' % clip_height)
file.close()
except:
return
def setConfiguredPosition():
setPosition(int(config.plugins.VideoClippingSetup.clip_left.value), int(config.plugins.VideoClippingSetup.clip_width.value), int(config.plugins.VideoClippingSetup.clip_top.value), int(config.plugins.VideoClippingSetup.clip_height.value))
def main(session, **kwargs):
session.open(VideoClippingCoordinates)
def startup(reason, **kwargs):
setConfiguredPosition()
def Plugins(**kwargs):
from os import path
if path.exists("/proc/stb/vmpeg/0/clip_left"):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = "Video clipping setup", description = "clip overscan / letterbox borders", where = PluginDescriptor.WHERE_PLUGINMENU, fnc = main),
PluginDescriptor(name = "Video clipping setup", description = "", where = PluginDescriptor.WHERE_SESSIONSTART, fnc = startup)]
return []
|
ziir/lumbergh | refs/heads/master | careers/careers/views.py | 2 | from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.views.generic import DetailView
from django_jobvite import models as jobvite_models
import utils
from careers.careers.forms import PositionFilterForm
from careers.django_workable import models as workable_models
def home(request):
return render(request, 'careers/home.html')
def listings(request):
return render(request, 'careers/listings.html', {
'positions': utils.get_all_positions(
order_by=lambda x: u'{0} {1}'.format(x.category.name, x.title)),
'form': PositionFilterForm(request.GET or None),
})
def position(request, job_id=None):
# Cannot use __exact instead of __contains due to MySQL collation
# which does not allow case sensitive matching.
position = get_object_or_404(jobvite_models.Position, job_id__contains=job_id)
positions = utils.get_all_positions(filters={'category__name': position.category.name},
order_by=lambda x: x.title)
# Add applicant source param for jobvite
position.apply_url += '&s=PDN'
return render(request, 'careers/position.html', {
'position': position,
'positions': positions,
})
class WorkablePositionDetailView(DetailView):
context_object_name = 'position'
model = workable_models.Position
template_name = 'careers/position.html'
slug_field = 'shortcode'
slug_url_kwarg = 'shortcode'
def get_context_data(self, **kwargs):
context = super(WorkablePositionDetailView, self).get_context_data(**kwargs)
context['positions'] = utils.get_all_positions(
filters={'category__name': context['position'].category.name},
order_by=lambda x: x.title)
return context
|
lsst-sqre/ltd-keeper | refs/heads/master | keeper/cli.py | 1 | """Command line subcommands for the Flask CLI.
Flask CLI subcommands are implemented with Click. The application factory
(`keeper.appfactory`) registers these
"""
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import alembic
import click
from flask import current_app
from flask.cli import with_appcontext
from keeper.models import Permission, User, db
from keeper.version import get_version
if TYPE_CHECKING:
from flask import Flask
__all__ = [
"add_app_commands",
"createdb_command",
"init_command",
"version_command",
]
def add_app_commands(app: Flask) -> None:
"""Add custom flask subcommands to the Flask app.
This function is called by `keeper.appfactory.create_flask_app`.
"""
app.cli.add_command(createdb_command)
app.cli.add_command(init_command)
app.cli.add_command(version_command)
@click.command("createdb")
@with_appcontext
def createdb_command() -> None:
"""Deploy the current schema in a new database.
This database is 'stamped' as having the current alembic schema version.
Normally, in a new installation, run::
flask createdb
flask init
This creates the tables and an initial user.
To migrate database servers, see the copydb sub-command.
"""
db.create_all()
# stamp tables with latest schema version
config_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "migrations/alembic.ini")
)
alembic_cfg = alembic.config.Config(config_path)
alembic.command.stamp(alembic_cfg, "head")
@click.command("init")
@with_appcontext
def init_command() -> None:
"""Initialize the application DB.
Bootstraps an administrative user given the environment variables:
- ``LTD_KEEPER_BOOTSTRAP_USER``
- ``LTD_KEEPER_BOOTSTRAP_PASSWORD``
"""
if User.query.get(1) is None:
u = User(
username=current_app.config["DEFAULT_USER"],
permissions=Permission.full_permissions(),
)
u.set_password(current_app.config["DEFAULT_PASSWORD"])
db.session.add(u)
db.session.commit()
@click.command("version")
@with_appcontext
def version_command() -> None:
"""Print the LTD Keeper application version.
Alternatively, to get the Flask and Python versions, run::
flask --version
"""
click.echo(get_version())
|
turbomanage/training-data-analyst | refs/heads/master | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/itsdangerous/exc.py | 11 | from ._compat import PY2
from ._compat import text_type
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the base
for all exceptions that itsdangerous defines.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
super(BadData, self).__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode("utf-8")
class BadSignature(BadData):
"""Raised if a signature does not match."""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised if a time-based signature is invalid. This is a subclass
of :class:`BadSignature`.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Raised if a signature timestamp is older than ``max_age``. This
is a subclass of :exc:`BadTimeSignature`.
"""
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None, original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error = original_error
class BadPayload(BadData):
"""Raised if a payload is invalid. This could happen if the payload
is loaded despite an invalid signature, or if there is a mismatch
between the serializer and deserializer. The original exception
that occurred during loading is stored on as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error = original_error
|
Andre-Castro/summer17WebApp | refs/heads/master | HFWebApp/HFWebApp/urls.py | 1 | """HFWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', include('frontPage.urls')),
url(r'^forum/', include('forumSelect.urls')),
]
|
usc-isi/essex-baremetal-support | refs/heads/master | nova/tests/test_localization.py | 8 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import nova
from nova import test
class LocalizationTestCase(test.TestCase):
def test_multiple_positional_format_placeholders(self):
pat = re.compile("\W_\(")
single_pat = re.compile("\W%\W")
root_path = os.path.dirname(nova.__file__)
problems = {}
for root, dirs, files in os.walk(root_path):
for fname in files:
if not fname.endswith(".py"):
continue
pth = os.path.join(root, fname)
txt = fulltext = file(pth).read()
txt_lines = fulltext.splitlines()
if not pat.search(txt):
continue
problems[pth] = []
pos = txt.find("_(")
while pos > -1:
# Make sure that this isn't part of a dunder;
# e.g., __init__(...
# or something like 'self.assert_(...'
test_txt = txt[pos - 1: pos + 10]
if not (pat.search(test_txt)):
txt = txt[pos + 2:]
pos = txt.find("_(")
continue
pos += 2
txt = txt[pos:]
innerChars = []
# Count pairs of open/close parens until _() closing
# paren is found.
parenCount = 1
pos = 0
while parenCount > 0:
char = txt[pos]
if char == "(":
parenCount += 1
elif char == ")":
parenCount -= 1
innerChars.append(char)
pos += 1
inner_all = "".join(innerChars)
# Filter out '%%' and '%('
inner = inner_all.replace("%%", "").replace("%(", "")
# Filter out the single '%' operators
inner = single_pat.sub("", inner)
# Within the remaining content, count %
fmtCount = inner.count("%")
if fmtCount > 1:
inner_first = inner_all.splitlines()[0]
lns = ["%s" % (p + 1)
for p, t in enumerate(txt_lines)
if inner_first in t]
lnums = ", ".join(lns)
# Using ugly string concatenation to avoid having
# this test fail itself.
inner_all = "_" + "(" + "%s" % inner_all
problems[pth].append("Line: %s Text: %s" %
(lnums, inner_all))
# Look for more
pos = txt.find("_(")
if not problems[pth]:
del problems[pth]
if problems:
out = ["Problem(s) found in localized string formatting",
"(see http://www.gnu.org/software/hello/manual/"
"gettext/Python.html for more information)",
"",
" ------------ Files to fix ------------"]
for pth in problems:
out.append(" %s:" % pth)
for val in set(problems[pth]):
out.append(" %s" % val)
raise AssertionError("\n".join(out))
|
andyraib/data-storage | refs/heads/master | python_scripts/env/lib/python3.6/site-packages/setuptools/command/register.py | 986 | import distutils.command.register as orig
class register(orig.register):
__doc__ = orig.register.__doc__
def run(self):
# Make sure that we are using valid current name/version info
self.run_command('egg_info')
orig.register.run(self)
|
semonte/intellij-community | refs/heads/master | python/testData/mover/lastComment1_afterDown.py | 10 | def f():
if True:
a = 1
else:
a = 2
#comment <caret> |
naturali/tensorflow | refs/heads/r0.11 | tensorflow/contrib/quantization/python/array_ops.py | 14 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.quantization.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.contrib.quantization.ops.gen_array_ops import dequantize
from tensorflow.contrib.quantization.ops.gen_array_ops import quantize_v2
from tensorflow.contrib.quantization.ops.gen_array_ops import quantized_concat
|
pointhi/kicad-footprint-generator | refs/heads/master | KicadModTree/nodes/specialized/Translation.py | 2 | # KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2016 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
from KicadModTree.Vector import *
from KicadModTree.nodes.Node import Node
class Translation(Node):
"""Apply translation to the child tree
:param x: change of x coordinate
:type x: ``float``
:param y: change of y coordinate
:type y: ``float``
:Example:
>>> from KicadModTree import *
>>> Translation(1, 2)
"""
def __init__(self, x, y):
Node.__init__(self)
# translation information
self.offset_x = x
self.offset_y = y
def getRealPosition(self, coordinate, rotation=None):
parsed_coordinate = Vector2D(coordinate)
# calculate translation
translation_coordinate = {'x': parsed_coordinate.x + self.offset_x,
'y': parsed_coordinate.y + self.offset_y}
if not self._parent:
if rotation is None:
return translation_coordinate
else:
return translation_coordinate, rotation
else:
return self._parent.getRealPosition(translation_coordinate, rotation)
def _getRenderTreeText(self):
render_text = Node._getRenderTreeText(self)
render_text += " [x: {x}, y: {y}]".format(x=self.offset_x,
y=self.offset_y)
return render_text
|
tuskar/tuskar-ui | refs/heads/master | horizon/test/test_dashboards/cats/tigers/views.py | 121 | from horizon import views
class IndexView(views.APIView):
# A very simple class-based view...
template_name = 'cats/tigers/index.html'
def get_data(self, request, context, *args, **kwargs):
# Add data to the context here...
return context
|
mdklatt/argparse-cpp | refs/heads/master | test/lib/gtest/googlemock/scripts/generator/cpp/utils.py | 1158 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
|
firebase/grpc-SwiftPM | refs/heads/main | tools/codegen/core/gen_stats_data.py | 5 | #!/usr/bin/env python2.7
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import ctypes
import math
import sys
import yaml
import json
with open('src/core/lib/debug/stats_data.yaml') as f:
attrs = yaml.load(f.read())
REQUIRED_FIELDS = ['name', 'doc']
def make_type(name, fields):
return (collections.namedtuple(
name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
def c_str(s, encoding='ascii'):
if isinstance(s, unicode):
s = s.encode(encoding)
result = ''
for c in s:
if not (32 <= ord(c) < 127) or c in ('\\', '"'):
result += '\\%03o' % ord(c)
else:
result += c
return '"' + result + '"'
types = (
make_type('Counter', []),
make_type('Histogram', ['max', 'buckets']),
)
inst_map = dict((t[0].__name__, t[1]) for t in types)
stats = []
for attr in attrs:
found = False
for t, lst in types:
t_name = t.__name__.lower()
if t_name in attr:
name = attr[t_name]
del attr[t_name]
lst.append(t(name=name, **attr))
found = True
break
assert found, "Bad decl: %s" % attr
def dbl2u64(d):
return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
def shift_works_until(mapped_bounds, shift_bits):
for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
a, b = ab
if (a >> shift_bits) == (b >> shift_bits):
return i
return len(mapped_bounds)
def find_ideal_shift(mapped_bounds, max_size):
best = None
for shift_bits in reversed(range(0, 64)):
n = shift_works_until(mapped_bounds, shift_bits)
if n == 0: continue
table_size = mapped_bounds[n - 1] >> shift_bits
if table_size > max_size: continue
if table_size > 65535: continue
if best is None:
best = (shift_bits, n, table_size)
elif best[1] < n:
best = (shift_bits, n, table_size)
print best
return best
def gen_map_table(mapped_bounds, shift_data):
tbl = []
cur = 0
print mapped_bounds
mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
print mapped_bounds
for i in range(0, mapped_bounds[shift_data[1] - 1]):
while i > mapped_bounds[cur]:
cur += 1
tbl.append(cur)
return tbl
static_tables = []
def decl_static_table(values, type):
global static_tables
v = (type, values)
for i, vp in enumerate(static_tables):
if v == vp: return i
print "ADD TABLE: %s %r" % (type, values)
r = len(static_tables)
static_tables.append(v)
return r
def type_for_uint_table(table):
mv = max(table)
if mv < 2**8:
return 'uint8_t'
elif mv < 2**16:
return 'uint16_t'
elif mv < 2**32:
return 'uint32_t'
else:
return 'uint64_t'
def gen_bucket_code(histogram):
bounds = [0, 1]
done_trivial = False
done_unmapped = False
first_nontrivial = None
first_unmapped = None
while len(bounds) < histogram.buckets + 1:
if len(bounds) == histogram.buckets:
nextb = int(histogram.max)
else:
mul = math.pow(
float(histogram.max) / bounds[-1],
1.0 / (histogram.buckets + 1 - len(bounds)))
nextb = int(math.ceil(bounds[-1] * mul))
if nextb <= bounds[-1] + 1:
nextb = bounds[-1] + 1
elif not done_trivial:
done_trivial = True
first_nontrivial = len(bounds)
bounds.append(nextb)
bounds_idx = decl_static_table(bounds, 'int')
if done_trivial:
first_nontrivial_code = dbl2u64(first_nontrivial)
code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
shift_data = find_ideal_shift(code_bounds[first_nontrivial:],
256 * histogram.buckets)
#print first_nontrivial, shift_data, bounds
#if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
if first_nontrivial is None:
code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
histogram.name.upper())
else:
code += 'if (value < %d) {\n' % first_nontrivial
code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
histogram.name.upper())
code += 'return;\n'
code += '}'
first_nontrivial_code = dbl2u64(first_nontrivial)
if shift_data is not None:
map_table_idx = decl_static_table(map_table,
type_for_uint_table(map_table))
code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
code += '_val.dbl = value;\n'
code += 'if (_val.uint < %dull) {\n' % (
(map_table[-1] << shift_data[0]) + first_nontrivial_code)
code += 'int bucket = '
code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (
map_table_idx, first_nontrivial_code, shift_data[0],
first_nontrivial)
code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
code += 'bucket -= (_val.uint < _bkt.uint);\n'
code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper(
)
code += 'return;\n'
code += '}\n'
code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper(
)
code += 'grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_%d, %d));\n' % (
bounds_idx, histogram.buckets)
return (code, bounds_idx)
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >> f, '/*'
for line in banner:
print >> f, ' * %s' % line
print >> f, ' */'
print >> f
with open('src/core/lib/debug/stats_data.h', 'w') as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner(
[H],
["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print >> H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
print >> H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
print >> H
print >> H, "#include <grpc/support/port_platform.h>"
print >> H
print >> H, "#include <inttypes.h>"
print >> H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
print >> H
for typename, instances in sorted(inst_map.items()):
print >> H, "typedef enum {"
for inst in instances:
print >> H, " GRPC_STATS_%s_%s," % (typename.upper(),
inst.name.upper())
print >> H, " GRPC_STATS_%s_COUNT" % (typename.upper())
print >> H, "} grpc_stats_%ss;" % (typename.lower())
print >> H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
typename.lower(), typename.upper())
print >> H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
typename.lower(), typename.upper())
histo_start = []
histo_buckets = []
histo_bucket_boundaries = []
print >> H, "typedef enum {"
first_slot = 0
for histogram in inst_map['Histogram']:
histo_start.append(first_slot)
histo_buckets.append(histogram.buckets)
print >> H, " GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (
histogram.name.upper(), first_slot)
print >> H, " GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (
histogram.name.upper(), histogram.buckets)
first_slot += histogram.buckets
print >> H, " GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
print >> H, "} grpc_stats_histogram_constants;"
print >> H, "#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)"
for ctr in inst_map['Counter']:
print >> H, ("#define GRPC_STATS_INC_%s() " +
"GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") % (
ctr.name.upper(), ctr.name.upper())
for histogram in inst_map['Histogram']:
print >> H, "#define GRPC_STATS_INC_%s(value) grpc_stats_inc_%s( (int)(value))" % (
histogram.name.upper(), histogram.name.lower())
print >> H, "void grpc_stats_inc_%s(int x);" % histogram.name.lower()
print >> H, "#else"
for ctr in inst_map['Counter']:
print >> H, ("#define GRPC_STATS_INC_%s() ") % (ctr.name.upper())
for histogram in inst_map['Histogram']:
print >> H, "#define GRPC_STATS_INC_%s(value)" % (
histogram.name.upper())
print >> H, "#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */"
for i, tbl in enumerate(static_tables):
print >> H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i,
len(tbl[1]))
print >> H, "extern const int grpc_stats_histo_buckets[%d];" % len(
inst_map['Histogram'])
print >> H, "extern const int grpc_stats_histo_start[%d];" % len(
inst_map['Histogram'])
print >> H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(
inst_map['Histogram'])
print >> H, "extern void (*const grpc_stats_inc_histogram[%d])(int x);" % len(
inst_map['Histogram'])
print >> H
print >> H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
with open('src/core/lib/debug/stats_data.cc', 'w') as C:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([C], [line[2:].rstrip() for line in copyright])
put_banner(
[C],
["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print >> C, "#include <grpc/support/port_platform.h>"
print >> C
print >> C, "#include \"src/core/lib/debug/stats.h\""
print >> C, "#include \"src/core/lib/debug/stats_data.h\""
print >> C, "#include \"src/core/lib/gpr/useful.h\""
print >> C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
print >> C
histo_code = []
for histogram in inst_map['Histogram']:
code, bounds_idx = gen_bucket_code(histogram)
histo_bucket_boundaries.append(bounds_idx)
histo_code.append(code)
for typename, instances in sorted(inst_map.items()):
print >> C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
typename.lower(), typename.upper())
for inst in instances:
print >> C, " %s," % c_str(inst.name)
print >> C, "};"
print >> C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
typename.lower(), typename.upper())
for inst in instances:
print >> C, " %s," % c_str(inst.doc)
print >> C, "};"
for i, tbl in enumerate(static_tables):
print >> C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
for histogram, code in zip(inst_map['Histogram'], histo_code):
print >> C, ("void grpc_stats_inc_%s(int value) {%s}") % (
histogram.name.lower(), code)
print >> C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (len(
inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
print >> C, "const int grpc_stats_histo_start[%d] = {%s};" % (len(
inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
print >> C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
len(inst_map['Histogram']), ','.join(
'grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
print >> C, "void (*const grpc_stats_inc_histogram[%d])(int x) = {%s};" % (
len(inst_map['Histogram']), ','.join(
'grpc_stats_inc_%s' % histogram.name.lower()
for histogram in inst_map['Histogram']))
# patch qps_test bigquery schema
RECORD_EXPLICIT_PERCENTILES = [50, 95, 99]
with open('tools/run_tests/performance/scenario_result_schema.json', 'r') as f:
qps_schema = json.loads(f.read())
def FindNamed(js, name):
for el in js:
if el['name'] == name:
return el
def RemoveCoreFields(js):
new_fields = []
for field in js['fields']:
if not field['name'].startswith('core_'):
new_fields.append(field)
js['fields'] = new_fields
RemoveCoreFields(FindNamed(qps_schema, 'clientStats'))
RemoveCoreFields(FindNamed(qps_schema, 'serverStats'))
def AddCoreFields(js):
for counter in inst_map['Counter']:
js['fields'].append({
'name': 'core_%s' % counter.name,
'type': 'INTEGER',
'mode': 'NULLABLE'
})
for histogram in inst_map['Histogram']:
js['fields'].append({
'name': 'core_%s' % histogram.name,
'type': 'STRING',
'mode': 'NULLABLE'
})
js['fields'].append({
'name': 'core_%s_bkts' % histogram.name,
'type': 'STRING',
'mode': 'NULLABLE'
})
for pctl in RECORD_EXPLICIT_PERCENTILES:
js['fields'].append({
'name': 'core_%s_%dp' % (histogram.name, pctl),
'type': 'FLOAT',
'mode': 'NULLABLE'
})
AddCoreFields(FindNamed(qps_schema, 'clientStats'))
AddCoreFields(FindNamed(qps_schema, 'serverStats'))
with open('tools/run_tests/performance/scenario_result_schema.json', 'w') as f:
f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
# and generate a helper script to massage scenario results into the format we'd
# like to query
with open('tools/run_tests/performance/massage_qps_stats.py', 'w') as P:
with open(sys.argv[0]) as my_source:
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
print >> P, line.rstrip()
break
for line in my_source:
if line[0] != '#':
break
print >> P, line.rstrip()
print >> P
print >> P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
print >> P
print >> P, 'import massage_qps_stats_helpers'
print >> P, 'def massage_qps_stats(scenario_result):'
print >> P, ' for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
print >> P, ' if "coreStats" in stats:'
print >> P, ' # Get rid of the "coreStats" element and replace it by statistics'
print >> P, ' # that correspond to columns in the bigquery schema.'
print >> P, ' core_stats = stats["coreStats"]'
print >> P, ' del stats["coreStats"]'
for counter in inst_map['Counter']:
print >> P, ' stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (
counter.name, counter.name)
for i, histogram in enumerate(inst_map['Histogram']):
print >> P, ' h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
print >> P, ' stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
print >> P, ' stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
for pctl in RECORD_EXPLICIT_PERCENTILES:
print >> P, ' stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
histogram.name, pctl, pctl)
with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S:
columns = []
for counter in inst_map['Counter']:
columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
print >> S, ',\n'.join('%s:%s' % x for x in columns)
|
benfinke/ns_python | refs/heads/master | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsglobal_dnspolicy_binding.py | 3 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnsglobal_dnspolicy_binding(base_resource) :
""" Binding class showing the dnspolicy that can be bound to dnsglobal.
"""
def __init__(self) :
self._policyname = ""
self._type = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._numpol = 0
self._flowtype = 0
self.___count = 0
@property
def priority(self) :
ur"""Specifies the priority of the policy with which it is bound. Maximum allowed priority should be less than 65535.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Specifies the priority of the policy with which it is bound. Maximum allowed priority should be less than 65535.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the dns policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the dns policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.<br/>Minimum length = 1.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.<br/>Minimum length = 1
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke flag.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke flag.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def type(self) :
ur"""Type of global bind point for which to show bound policies.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, RES_OVERRIDE, RES_DEFAULT.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
ur"""Type of global bind point for which to show bound policies.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, RES_OVERRIDE, RES_DEFAULT
"""
try :
self._type = type
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label invocation.<br/>Possible values = policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of policy label invocation.<br/>Possible values = policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def flowtype(self) :
ur"""flowtype of the bound rewrite policy.
"""
try :
return self._flowtype
except Exception as e:
raise e
@property
def numpol(self) :
ur"""The number of policies bound to the bindpoint.
"""
try :
return self._numpol
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnsglobal_dnspolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnsglobal_dnspolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = dnsglobal_dnspolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.type = resource.type
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [dnsglobal_dnspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].type = resource[i].type
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = dnsglobal_dnspolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.type = resource.type
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [dnsglobal_dnspolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].type = resource[i].type
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a dnsglobal_dnspolicy_binding resources.
"""
try :
obj = dnsglobal_dnspolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of dnsglobal_dnspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsglobal_dnspolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count dnsglobal_dnspolicy_binding resources configued on NetScaler.
"""
try :
obj = dnsglobal_dnspolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of dnsglobal_dnspolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsglobal_dnspolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
REQ_OVERRIDE = "REQ_OVERRIDE"
REQ_DEFAULT = "REQ_DEFAULT"
RES_OVERRIDE = "RES_OVERRIDE"
RES_DEFAULT = "RES_DEFAULT"
class Labeltype:
policylabel = "policylabel"
class dnsglobal_dnspolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.dnsglobal_dnspolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnsglobal_dnspolicy_binding = [dnsglobal_dnspolicy_binding() for _ in range(length)]
|
Fusion-Rom/android_external_chromium_org | refs/heads/lp5.1 | third_party/closure_linter/closure_linter/common/tokenizer.py | 127 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based lexer."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
# Shorthand
Type = tokens.TokenType
class Tokenizer(object):
"""General purpose tokenizer.
Attributes:
mode: The latest mode of the tokenizer. This allows patterns to distinguish
if they are mid-comment, mid-parameter list, etc.
matchers: Dictionary of modes to sequences of matchers that define the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
def __init__(self, starting_mode, matchers, default_types):
"""Initialize the tokenizer.
Args:
starting_mode: Mode to start in.
matchers: Dictionary of modes to sequences of matchers that defines the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
self.__starting_mode = starting_mode
self.matchers = matchers
self.default_types = default_types
def TokenizeFile(self, file):
"""Tokenizes the given file.
Args:
file: An iterable that yields one line of the file at a time.
Returns:
The first token in the file
"""
# The current mode.
self.mode = self.__starting_mode
# The first token in the stream.
self.__first_token = None
# The last token added to the token stream.
self.__last_token = None
# The current line number.
self.__line_number = 0
for line in file:
self.__line_number += 1
self.__TokenizeLine(line)
return self.__first_token
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new Token object (or subclass).
Args:
string: The string of input the token represents.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
Returns:
The newly created Token object.
"""
return tokens.Token(string, token_type, line, line_number, values,
line_number)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
Args:
line: The contents of the line.
"""
string = line.rstrip('\n\r\f')
line_number = self.__line_number
self.__start_index = 0
if not string:
self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
return
normal_token = ''
index = 0
while index < len(string):
for matcher in self.matchers[self.mode]:
if matcher.line_start and index > 0:
continue
match = matcher.regex.match(string, index)
if match:
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line,
line_number))
normal_token = ''
# Add the match.
self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
line_number, match.groupdict()))
# Change the mode to the correct one for after this match.
self.mode = matcher.result_mode or self.mode
# Shorten the string to be matched.
index = match.end()
break
else:
# If the for loop finishes naturally (i.e. no matches) we just add the
# first character to the string of consecutive non match characters.
# These will constitute a NORMAL token.
if string:
normal_token += string[index:index + 1]
index += 1
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line, line_number))
def __CreateNormalToken(self, mode, string, line, line_number):
"""Creates a normal token.
Args:
mode: The current mode.
string: The string to tokenize.
line: The line of text.
line_number: The line number within the file.
Returns:
A Token object, of the default type for the current mode.
"""
type = Type.NORMAL
if mode in self.default_types:
type = self.default_types[mode]
return self._CreateToken(string, type, line, line_number)
def __AddToken(self, token):
"""Add the given token to the token stream.
Args:
token: The token to add.
"""
# Store the first token, or point the previous token to this one.
if not self.__first_token:
self.__first_token = token
else:
self.__last_token.next = token
# Establish the doubly linked list
token.previous = self.__last_token
self.__last_token = token
# Compute the character indices
token.start_index = self.__start_index
self.__start_index += token.length
|
le9i0nx/ansible | refs/heads/devel | lib/ansible/module_utils/openstack.py | 76 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ansible.module_utils.six import iteritems
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL = os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD = os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME = os.environ.get('OS_REGION_NAME', None)
OS_USERNAME = os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME = os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username=dict(default=OS_USERNAME),
auth_url=dict(default=OS_AUTH_URL),
region_name=dict(default=OS_REGION_NAME),
availability_zone=dict(),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for (k, v) in iteritems(addresses):
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
auth_type=dict(default=None),
auth=dict(default=None, type='dict', no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=None, type='bool', aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None, no_log=True),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
|
0k/odoo | refs/heads/master | addons/hr_gamification/__openerp__.py | 320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'website': 'https://www.odoo.com/page/employees',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
|
wrapp/AutobahnPython | refs/heads/master | examples/wamp/rpc/simple/example2/client.py | 27 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from autobahn.websocket import connectWS
from autobahn.wamp import WampClientFactory, WampClientProtocol
class SimpleClientProtocol(WampClientProtocol):
"""
Demonstrates simple Remote Procedure Calls (RPC) with
AutobahnPython and Twisted Deferreds.
"""
def show(self, result):
print "SUCCESS:", result
def logerror(self, e):
erroruri, errodesc, errordetails = e.value.args
print "ERROR: %s ('%s') - %s" % (erroruri, errodesc, errordetails)
def done(self, *args):
self.sendClose()
reactor.stop()
def onSessionOpen(self):
self.prefix("calc", "http://example.com/simple/calc#")
d1 = self.call("calc:square", 23).addCallback(self.show)
d2 = self.call("calc:add", 23, 7).addCallback(self.show)
d3 = self.call("calc:sum", [1, 2, 3, 4, 5]).addCallback(self.show)
d4 = self.call("calc:square", 23).addCallback(lambda res: \
self.call("calc:sqrt", res)).addCallback(self.show)
d5 = self.call("calc:sqrt", -1).addCallbacks(self.show,
self.logerror)
d6 = self.call("calc:square", 1001).addCallbacks(self.show,
self.logerror)
d7 = self.call("calc:asum", [1, 2, 3]).addCallback(self.show)
d8 = self.call("calc:sum", [4, 5, 6]).addCallback(self.show)
d9 = self.call("calc:pickySum", range(0, 30)).addCallbacks(self.show,
self.logerror)
## we want to shutdown the client exactly when all deferreds are finished
DeferredList([d1, d2, d3, d4, d5, d6, d7, d8, d9]).addCallback(self.done)
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = WampClientFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = SimpleClientProtocol
connectWS(factory)
reactor.run()
|
jvkops/django | refs/heads/master | tests/model_meta/models.py | 192 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Relation(models.Model):
pass
class AbstractPerson(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(Relation, models.CASCADE, related_name='fk_abstract_rel')
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name='m2m_abstract_rel')
friends_abstract = models.ManyToManyField('self', related_name='friends_abstract', symmetrical=True)
following_abstract = models.ManyToManyField('self', related_name='followers_abstract', symmetrical=False)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='fo_abstract_rel',
)
# GFK fields
content_type_abstract = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey('content_type_abstract', 'object_id_abstract')
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
class BasePerson(AbstractPerson):
# DATA fields
data_base = models.CharField(max_length=10)
fk_base = models.ForeignKey(Relation, models.CASCADE, related_name='fk_base_rel')
# M2M fields
m2m_base = models.ManyToManyField(Relation, related_name='m2m_base_rel')
friends_base = models.ManyToManyField('self', related_name='friends_base', symmetrical=True)
following_base = models.ManyToManyField('self', related_name='followers_base', symmetrical=False)
# VIRTUAL fields
data_not_concrete_base = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['base_non_concrete_id'],
to_fields=['id'],
related_name='fo_base_rel',
)
# GFK fields
content_type_base = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_base = models.PositiveIntegerField()
content_object_base = GenericForeignKey('content_type_base', 'object_id_base')
# GR fields
generic_relation_base = GenericRelation(Relation)
class Person(BasePerson):
# DATA fields
data_inherited = models.CharField(max_length=10)
fk_inherited = models.ForeignKey(Relation, models.CASCADE, related_name='fk_concrete_rel')
# M2M Fields
m2m_inherited = models.ManyToManyField(Relation, related_name='m2m_concrete_rel')
friends_inherited = models.ManyToManyField('self', related_name='friends_concrete', symmetrical=True)
following_inherited = models.ManyToManyField('self', related_name='followers_concrete', symmetrical=False)
# VIRTUAL fields
data_not_concrete_inherited = models.ForeignObject(
Relation,
on_delete=models.CASCADE,
from_fields=['model_non_concrete_id'],
to_fields=['id'],
related_name='fo_concrete_rel',
)
# GFK fields
content_type_concrete = models.ForeignKey(ContentType, models.CASCADE, related_name='+')
object_id_concrete = models.PositiveIntegerField()
content_object_concrete = GenericForeignKey('content_type_concrete', 'object_id_concrete')
# GR fields
generic_relation_concrete = GenericRelation(Relation)
class ProxyPerson(Person):
class Meta:
proxy = True
class Relating(models.Model):
# ForeignKey to BasePerson
baseperson = models.ForeignKey(BasePerson, models.CASCADE, related_name='relating_baseperson')
baseperson_hidden = models.ForeignKey(BasePerson, models.CASCADE, related_name='+')
# ForeignKey to Person
person = models.ForeignKey(Person, models.CASCADE, related_name='relating_person')
person_hidden = models.ForeignKey(Person, models.CASCADE, related_name='+')
# ForeignKey to ProxyPerson
proxyperson = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='relating_proxyperson')
proxyperson_hidden = models.ForeignKey(ProxyPerson, models.CASCADE, related_name='+')
# ManyToManyField to BasePerson
basepeople = models.ManyToManyField(BasePerson, related_name='relating_basepeople')
basepeople_hidden = models.ManyToManyField(BasePerson, related_name='+')
# ManyToManyField to Person
people = models.ManyToManyField(Person, related_name='relating_people')
people_hidden = models.ManyToManyField(Person, related_name='+')
# ParentListTests models
class CommonAncestor(models.Model):
pass
class FirstParent(CommonAncestor):
first_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class SecondParent(CommonAncestor):
second_ancestor = models.OneToOneField(CommonAncestor, models.SET_NULL, primary_key=True, parent_link=True)
class Child(FirstParent, SecondParent):
pass
|
ClearCorp/account-financial-reporting | refs/heads/8.0 | account_financial_report/wizard/wizard.py | 32 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Humberto Arocha humberto@openerp.com.ve
# Angelica Barrios angelicaisabelb@gmail.com
# Jordi Esteve <jesteve@zikzakmedia.com>
# Javier Duran <javieredm@gmail.com>
# Planified by: Humberto Arocha
# Finance by: LUBCAN COL S.A.S http://www.lubcancol.com
# Audited by: Humberto Arocha humberto@openerp.com.ve
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openerp.osv import osv, fields
import time
from openerp.tools.translate import _
class wizard_report(osv.osv_memory):
_name = "wizard.report"
_columns = {
'afr_id': fields.many2one(
'afr', 'Custom Report',
help='If you have already set a Custom Report, Select it Here.'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one(
'res.currency', 'Currency',
help="Currency at which this report will be expressed. If not \
selected will be used the one set in the company"),
'inf_type': fields.selection([('BS', 'Balance Sheet'),
('IS', 'Income Statement')],
'Type',
required=True),
'columns': fields.selection(
[('one', 'End. Balance'),
('two', 'Debit | Credit'),
('four', 'Initial | Debit | Credit | YTD'),
('five', 'Initial | Debit | Credit | Period | YTD'),
('qtr', "4 QTR's | YTD"), ('thirteen', '12 Months | YTD')],
'Columns', required=True),
'display_account': fields.selection(
[('all', 'All Accounts'),
('bal', 'With Balance'),
('mov', 'With movements'),
('bal_mov', 'With Balance / Movements')],
'Display accounts'),
'display_account_level': fields.integer(
'Up to level',
help='Display accounts up to this level (0 to show all)'),
'account_list': fields.many2many('account.account',
'rel_wizard_account',
'account_list',
'account_id',
'Root accounts',
required=True),
'fiscalyear': fields.many2one('account.fiscalyear', 'Fiscal year',
help='Fiscal Year for this report',
required=True),
'periods': fields.many2many(
'account.period', 'rel_wizard_period',
'wizard_id', 'period_id', 'Periods',
help='All periods in the fiscal year if empty'),
'analytic_ledger': fields.boolean(
'Analytic Ledger',
help="Allows to Generate an Analytic Ledger for accounts with \
moves. Available when Balance Sheet and 'Initial | Debit | Credit \
| YTD' are selected"),
'journal_ledger': fields.boolean(
'Journal Ledger',
help="Allows to Generate an Journal Ledger for accounts with \
moves. Available when Balance Sheet and 'Initial | Debit | Credit \
| YTD' are selected"),
'partner_balance': fields.boolean(
'Partner Balance',
help="Allows to Generate a Partner Balance for accounts with \
moves. Available when Balance Sheet and 'Initial | Debit | Credit \
| YTD' are selected"),
'tot_check': fields.boolean('Summarize?',
help='Checking will add a new line at the \
end of the Report which will Summarize \
Columns in Report'),
'lab_str': fields.char('Description',
help='Description for the Summary', size=128),
'target_move': fields.selection(
[('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Entries to Include',
required=True,
help='Print All Accounting Entries or just Posted Accounting \
Entries'),
# ~ Deprecated fields
'filter': fields.selection([('bydate', 'By Date'),
('byperiod', 'By Period'),
('all', 'By Date and Period'),
('none', 'No Filter')],
'Date/Period Filter'),
'date_to': fields.date('End date'),
'date_from': fields.date('Start date'),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-%d'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
'filter': lambda *a: 'byperiod',
'display_account_level': lambda *a: 0,
'inf_type': lambda *a: 'BS',
'company_id': lambda self, cr, uid, c: self.pool['res.company'].
_company_default_get(cr, uid, 'account.invoice', context=c),
'fiscalyear': lambda self, cr, uid, c: self.
pool['account.fiscalyear'].find(cr, uid),
'display_account': lambda *a: 'bal_mov',
'columns': lambda *a: 'five',
'target_move': 'posted',
}
def onchange_inf_type(self, cr, uid, ids, inf_type, context=None):
if context is None:
context = {}
res = {'value': {}}
if inf_type != 'BS':
res['value'].update({'analytic_ledger': False})
return res
def onchange_columns(self, cr, uid, ids, columns, fiscalyear, periods,
context=None):
if context is None:
context = {}
res = {'value': {}}
p_obj = self.pool.get("account.period")
all_periods = p_obj.search(cr, uid,
[('fiscalyear_id', '=', fiscalyear),
('special', '=', False)], context=context)
s = set(periods[0][2])
t = set(all_periods)
go = periods[0][2] and s.issubset(t) or False
if columns != 'four':
res['value'].update({'analytic_ledger': False})
if columns in ('qtr', 'thirteen'):
res['value'].update({'periods': all_periods})
else:
if go:
res['value'].update({'periods': periods})
else:
res['value'].update({'periods': []})
return res
def onchange_analytic_ledger(self, cr, uid, ids, company_id,
analytic_ledger, context=None):
if context is None:
context = {}
context['company_id'] = company_id
res = {'value': {}}
cur_id = self.pool.get('res.company').browse(
cr, uid, company_id, context=context).currency_id.id
res['value'].update({'currency_id': cur_id})
return res
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
if context is None:
context = {}
context['company_id'] = company_id
res = {'value': {}}
if not company_id:
return res
cur_id = self.pool.get('res.company').browse(
cr, uid, company_id, context=context).currency_id.id
fy_id = self.pool.get('account.fiscalyear').find(
cr, uid, context=context)
res['value'].update({'fiscalyear': fy_id})
res['value'].update({'currency_id': cur_id})
res['value'].update({'account_list': []})
res['value'].update({'periods': []})
res['value'].update({'afr_id': None})
return res
def onchange_afr_id(self, cr, uid, ids, afr_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if not afr_id:
return res
afr_brw = self.pool.get('afr').browse(cr, uid, afr_id, context=context)
res['value'].update({
'currency_id': afr_brw.currency_id
and afr_brw.currency_id.id
or afr_brw.company_id.currency_id.id})
res['value'].update({'inf_type': afr_brw.inf_type or 'BS'})
res['value'].update({'columns': afr_brw.columns or 'five'})
res['value'].update({
'display_account': afr_brw.display_account
or 'bal_mov'})
res['value'].update({
'display_account_level': afr_brw.
display_account_level or 0})
res['value'].update({
'fiscalyear': afr_brw.fiscalyear_id
and afr_brw.fiscalyear_id.id})
res['value'].update({'account_list': [
acc.id for acc in afr_brw.account_ids]})
res['value'].update({'periods': [p.id for p in afr_brw.period_ids]})
res['value'].update({
'analytic_ledger':
afr_brw.analytic_ledger or False})
res['value'].update({'tot_check': afr_brw.tot_check or False})
res['value'].update({'lab_str': afr_brw.lab_str or _(
'Write a Description for your Summary Total')})
return res
def _get_defaults(self, cr, uid, data, context=None):
if context is None:
context = {}
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if user.company_id:
company_id = user.company_id.id
else:
company_id = self.pool['res.company'].search(
cr, uid, [('parent_id', '=', False)])[0]
data['form']['company_id'] = company_id
fiscalyear_obj = self.pool['account.fiscalyear']
data['form']['fiscalyear'] = fiscalyear_obj.find(cr, uid)
data['form']['context'] = context
return data['form']
def _check_state(self, cr, uid, data, context=None):
if context is None:
context = {}
if data['form']['filter'] == 'bydate':
self._check_date(cr, uid, data, context)
return data['form']
def _check_date(self, cr, uid, data, context=None):
if context is None:
context = {}
if data['form']['date_from'] > data['form']['date_to']:
raise osv.except_osv(_('Error !'), (
'La fecha final debe ser mayor a la inicial'))
sql = """SELECT f.id, f.date_start, f.date_stop
FROM account_fiscalyear f
WHERE '%s' = f.id """ % (data['form']['fiscalyear'])
cr.execute(sql)
res = cr.dictfetchall()
if res:
if (data['form']['date_to'] > res[0]['date_stop']
or data['form']['date_from'] < res[0]['date_start']):
raise osv.except_osv(_('UserError'),
'Las fechas deben estar entre %s y %s'
% (res[0]['date_start'],
res[0]['date_stop']))
else:
return 'report'
else:
raise osv.except_osv(_('UserError'), 'No existe periodo fiscal')
def period_span(self, cr, uid, ids, fy_id, context=None):
if context is None:
context = {}
ap_obj = self.pool.get('account.period')
fy_id = fy_id and type(fy_id) in (list, tuple) and fy_id[0] or fy_id
if not ids:
# ~ No hay periodos
return ap_obj.search(cr, uid, [('fiscalyear_id', '=', fy_id),
('special', '=', False)],
order='date_start asc')
ap_brws = ap_obj.browse(cr, uid, ids, context=context)
date_start = min([period.date_start for period in ap_brws])
date_stop = max([period.date_stop for period in ap_brws])
return ap_obj.search(cr, uid, [('fiscalyear_id', '=', fy_id),
('special', '=', False),
('date_start', '>=', date_start),
('date_stop', '<=', date_stop)],
order='date_start asc')
def print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = {}
data['ids'] = context.get('active_ids', [])
data['model'] = context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(cr, uid, ids[0])
if data['form']['filter'] == 'byperiod':
del data['form']['date_from']
del data['form']['date_to']
data['form']['periods'] = self.period_span(
cr, uid,
data['form']['periods'],
data['form']['fiscalyear'])
elif data['form']['filter'] == 'bydate':
self._check_date(cr, uid, data)
del data['form']['periods']
elif data['form']['filter'] == 'none':
del data['form']['date_from']
del data['form']['date_to']
del data['form']['periods']
else:
self._check_date(cr, uid, data)
lis2 = str(data['form']['periods']).replace(
"[", "(").replace("]", ")")
sqlmm = """select min(p.date_start) as inicio,
max(p.date_stop) as fin
from account_period p
where p.id in %s""" % lis2
cr.execute(sqlmm)
minmax = cr.dictfetchall()
if minmax:
if (data['form']['date_to'] < minmax[0]['inicio']) \
or (data['form']['date_from'] > minmax[0]['fin']):
raise osv.except_osv(_('Error !'), _(
'La interseccion entre el periodo y fecha es vacio'))
if data['form']['columns'] == 'one':
name = 'afr.1cols'
if data['form']['columns'] == 'two':
name = 'afr.2cols'
if data['form']['columns'] == 'four':
if data['form']['analytic_ledger'] \
and data['form']['inf_type'] == 'BS':
name = 'afr.analytic.ledger'
elif data['form']['journal_ledger'] \
and data['form']['inf_type'] == 'BS':
name = 'afr.journal.ledger'
elif data['form']['partner_balance'] \
and data['form']['inf_type'] == 'BS':
name = 'afr.partner.balance'
else:
name = 'afr.4cols'
if data['form']['columns'] == 'five':
name = 'afr.5cols'
if data['form']['columns'] == 'qtr':
name = 'afr.qtrcols'
if data['form']['columns'] == 'thirteen':
name = 'afr.13cols'
return {'type': 'ir.actions.report.xml',
'report_name': name,
'datas': data}
wizard_report()
|